title: iOS–objc-runtime-new源码 date: 2021-10-19 17:12:25 tags: 原创 categories: iOS
源码objc-runtime-new.m
/*
* Copyright (c) 2005-2009 Apple Inc. All Rights Reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this
* file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
/***********************************************************************
* objc-runtime-new.m
* Support for new-ABI classes and images.
**********************************************************************/
#if __OBJC2__
#include "DenseMapExtras.h"
#include "objc-private.h"
#include "objc-runtime-new.h"
#include "objc-file.h"
#include "objc-zalloc.h"
#include <Block.h>
#include <objc/message.h>
#include <mach/shared_region.h>
#define newprotocol(p) ((protocol_t *)p)
static void disableTaggedPointers();
static void detach_class(Class cls, bool isMeta);
static void free_class(Class cls);
static IMP addMethod(Class cls, SEL name, IMP imp, const char *types, bool replace);
static void adjustCustomFlagsForMethodChange(Class cls, method_t *meth);
static method_t *search_method_list(const method_list_t *mlist, SEL sel);
template<typename T> static bool method_lists_contains_any(T *mlists, T *end,
SEL sels[], size_t selcount);
static void flushCaches(Class cls, const char *func, bool (^predicate)(Class c));
static void initializeTaggedPointerObfuscator(void);
#if SUPPORT_FIXUP
static void fixupMessageRef(message_ref_t *msg);
#endif
static Class realizeClassMaybeSwiftAndUnlock(Class cls, mutex_t& lock);
static Class readClass(Class cls, bool headerIsBundle, bool headerIsPreoptimized);
struct locstamped_category_t {
category_t *cat;
struct header_info *hi;
};
enum {
ATTACH_CLASS = 1 << 0,
ATTACH_METACLASS = 1 << 1,
ATTACH_CLASS_AND_METACLASS = 1 << 2,
ATTACH_EXISTING = 1 << 3,
};
static void attachCategories(Class cls, const struct locstamped_category_t *cats_list, uint32_t cats_count, int flags);
/***********************************************************************
* Lock management
**********************************************************************/
mutex_t runtimeLock;
mutex_t selLock;
#if CONFIG_USE_CACHE_LOCK
mutex_t cacheUpdateLock;
#endif
recursive_mutex_t loadMethodLock;
/***********************************************************************
* Class structure decoding
**********************************************************************/
const uintptr_t objc_debug_class_rw_data_mask = FAST_DATA_MASK;
/***********************************************************************
* Non-pointer isa decoding
**********************************************************************/
#if SUPPORT_INDEXED_ISA
// Indexed non-pointer isa.
// These are used to mask the ISA and see if its got an index or not.
const uintptr_t objc_debug_indexed_isa_magic_mask = ISA_INDEX_MAGIC_MASK;
const uintptr_t objc_debug_indexed_isa_magic_value = ISA_INDEX_MAGIC_VALUE;
// die if masks overlap
STATIC_ASSERT((ISA_INDEX_MASK & ISA_INDEX_MAGIC_MASK) == 0);
// die if magic is wrong
STATIC_ASSERT((~ISA_INDEX_MAGIC_MASK & ISA_INDEX_MAGIC_VALUE) == 0);
// Then these are used to extract the index from the ISA.
const uintptr_t objc_debug_indexed_isa_index_mask = ISA_INDEX_MASK;
const uintptr_t objc_debug_indexed_isa_index_shift = ISA_INDEX_SHIFT;
asm("\n .globl _objc_absolute_indexed_isa_magic_mask" \
"\n _objc_absolute_indexed_isa_magic_mask = " STRINGIFY2(ISA_INDEX_MAGIC_MASK));
asm("\n .globl _objc_absolute_indexed_isa_magic_value" \
"\n _objc_absolute_indexed_isa_magic_value = " STRINGIFY2(ISA_INDEX_MAGIC_VALUE));
asm("\n .globl _objc_absolute_indexed_isa_index_mask" \
"\n _objc_absolute_indexed_isa_index_mask = " STRINGIFY2(ISA_INDEX_MASK));
asm("\n .globl _objc_absolute_indexed_isa_index_shift" \
"\n _objc_absolute_indexed_isa_index_shift = " STRINGIFY2(ISA_INDEX_SHIFT));
// And then we can use that index to get the class from this array. Note
// the size is provided so that clients can ensure the index they get is in
// bounds and not read off the end of the array.
// Defined in the objc-msg-*.s files
// const Class objc_indexed_classes[]
// When we don't have enough bits to store a class*, we can instead store an
// index in to this array. Classes are added here when they are realized.
// Note, an index of 0 is illegal.
uintptr_t objc_indexed_classes_count = 0;
// SUPPORT_INDEXED_ISA
#else
// not SUPPORT_INDEXED_ISA
// These variables exist but are all set to 0 so that they are ignored.
const uintptr_t objc_debug_indexed_isa_magic_mask = 0;
const uintptr_t objc_debug_indexed_isa_magic_value = 0;
const uintptr_t objc_debug_indexed_isa_index_mask = 0;
const uintptr_t objc_debug_indexed_isa_index_shift = 0;
Class objc_indexed_classes[1] = { nil };
uintptr_t objc_indexed_classes_count = 0;
// not SUPPORT_INDEXED_ISA
#endif
#if SUPPORT_PACKED_ISA
// Packed non-pointer isa.
asm("\n .globl _objc_absolute_packed_isa_class_mask" \
"\n _objc_absolute_packed_isa_class_mask = " STRINGIFY2(ISA_MASK));
// a better definition is
// (uintptr_t)ptrauth_strip((void *)ISA_MASK, ISA_SIGNING_KEY)
// however we know that PAC uses bits outside of MACH_VM_MAX_ADDRESS
// so approximate the definition here to be constant
template <typename T>
static constexpr T coveringMask(T n) {
for (T mask = 0; mask != ~T{0}; mask = (mask << 1) | 1) {
if ((n & mask) == n) return mask;
}
return ~T{0};
}
const uintptr_t objc_debug_isa_class_mask = ISA_MASK & coveringMask(MACH_VM_MAX_ADDRESS - 1);
const uintptr_t objc_debug_isa_magic_mask = ISA_MAGIC_MASK;
const uintptr_t objc_debug_isa_magic_value = ISA_MAGIC_VALUE;
// die if masks overlap
STATIC_ASSERT((ISA_MASK & ISA_MAGIC_MASK) == 0);
// die if magic is wrong
STATIC_ASSERT((~ISA_MAGIC_MASK & ISA_MAGIC_VALUE) == 0);
// die if virtual address space bound goes up
STATIC_ASSERT((~ISA_MASK & MACH_VM_MAX_ADDRESS) == 0 ||
ISA_MASK + sizeof(void*) == MACH_VM_MAX_ADDRESS);
// SUPPORT_PACKED_ISA
#else
// not SUPPORT_PACKED_ISA
// These variables exist but enforce pointer alignment only.
const uintptr_t objc_debug_isa_class_mask = (~WORD_MASK);
const uintptr_t objc_debug_isa_magic_mask = WORD_MASK;
const uintptr_t objc_debug_isa_magic_value = 0;
// not SUPPORT_PACKED_ISA
#endif
/***********************************************************************
* Swift marker bits
**********************************************************************/
const uintptr_t objc_debug_swift_stable_abi_bit = FAST_IS_SWIFT_STABLE;
/***********************************************************************
* allocatedClasses
* A table of all classes (and metaclasses) which have been allocated
* with objc_allocateClassPair.
**********************************************************************/
namespace objc {
static ExplicitInitDenseSet<Class> allocatedClasses;
}
/***********************************************************************
* _firstRealizedClass
* The root of all realized classes
**********************************************************************/
static Class _firstRealizedClass = nil;
/***********************************************************************
* didInitialAttachCategories
* Whether the initial attachment of categories present at startup has
* been done.
**********************************************************************/
static bool didInitialAttachCategories = false;
/***********************************************************************
* didCallDyldNotifyRegister
* Whether the call to _dyld_objc_notify_register has completed.
**********************************************************************/
bool didCallDyldNotifyRegister = false;
/***********************************************************************
* smallMethodIMPMap
* The map from small method pointers to replacement IMPs.
*
* Locking: runtimeLock must be held when accessing this map.
**********************************************************************/
namespace objc {
static objc::LazyInitDenseMap<const method_t *, IMP> smallMethodIMPMap;
}
static IMP method_t_remappedImp_nolock(const method_t *m) {
runtimeLock.assertLocked();
auto *map = objc::smallMethodIMPMap.get(false);
if (!map)
return nullptr;
auto iter = map->find(m);
if (iter == map->end())
return nullptr;
return iter->second;
}
IMP method_t::remappedImp(bool needsLock) const {
ASSERT(isSmall());
if (needsLock) {
mutex_locker_t guard(runtimeLock);
return method_t_remappedImp_nolock(this);
} else {
return method_t_remappedImp_nolock(this);
}
}
void method_t::remapImp(IMP imp) {
ASSERT(isSmall());
runtimeLock.assertLocked();
auto *map = objc::smallMethodIMPMap.get(true);
(*map)[this] = imp;
}
objc_method_description *method_t::getSmallDescription() const {
static objc::LazyInitDenseMap<const method_t *, objc_method_description *> map;
mutex_locker_t guard(runtimeLock);
auto &ptr = (*map.get(true))[this];
if (!ptr) {
ptr = (objc_method_description *)malloc(sizeof *ptr);
ptr->name = name();
ptr->types = (char *)types();
}
return ptr;
}
/*
Low two bits of mlist->entsize is used as the fixed-up marker.
Method lists from shared cache are 1 (uniqued) or 3 (uniqued and sorted).
(Protocol method lists are not sorted because of their extra parallel data)
Runtime fixed-up method lists get 3.
High two bits of protocol->flags is used as the fixed-up marker.
PREOPTIMIZED VERSION:
Protocols from shared cache are 1<<30.
Runtime fixed-up protocols get 1<<30.
UN-PREOPTIMIZED VERSION:
Protocols from shared cache are 1<<30.
Shared cache's fixups are not trusted.
Runtime fixed-up protocols get 3<<30.
*/
static const uint32_t fixed_up_method_list = 3;
static const uint32_t uniqued_method_list = 1;
static uint32_t fixed_up_protocol = PROTOCOL_FIXED_UP_1;
static uint32_t canonical_protocol = PROTOCOL_IS_CANONICAL;
void
disableSharedCacheOptimizations(void)
{
fixed_up_protocol = PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2;
// Its safe to just set canonical protocol to 0 as we'll never call
// clearIsCanonical() unless isCanonical() returned true, which can't happen
// with a 0 mask
canonical_protocol = 0;
}
bool method_list_t::isUniqued() const {
return (flags() & uniqued_method_list) != 0;
}
bool method_list_t::isFixedUp() const {
// Ignore any flags in the top bits, just look at the bottom two.
return (flags() & 0x3) == fixed_up_method_list;
}
void method_list_t::setFixedUp() {
runtimeLock.assertLocked();
ASSERT(!isFixedUp());
entsizeAndFlags = entsize() | fixed_up_method_list;
}
bool protocol_t::isFixedUp() const {
return (flags & PROTOCOL_FIXED_UP_MASK) == fixed_up_protocol;
}
void protocol_t::setFixedUp() {
runtimeLock.assertLocked();
ASSERT(!isFixedUp());
flags = (flags & ~PROTOCOL_FIXED_UP_MASK) | fixed_up_protocol;
}
bool protocol_t::isCanonical() const {
return (flags & canonical_protocol) != 0;
}
void protocol_t::clearIsCanonical() {
runtimeLock.assertLocked();
ASSERT(isCanonical());
flags = flags & ~canonical_protocol;
}
const method_list_t_authed_ptr<method_list_t> *method_array_t::endCategoryMethodLists(Class cls) const
{
auto mlists = beginLists();
auto mlistsEnd = endLists();
if (mlists == mlistsEnd || !cls->data()->ro()->baseMethods())
{
// No methods, or no base methods.
// Everything here is a category method.
return mlistsEnd;
}
// Have base methods. Category methods are
// everything except the last method list.
return mlistsEnd - 1;
}
static const char *sel_cname(SEL sel)
{
return (const char *)(void *)sel;
}
static size_t protocol_list_size(const protocol_list_t *plist)
{
return sizeof(protocol_list_t) + plist->count * sizeof(protocol_t *);
}
static void try_free(const void *p)
{
if (p && malloc_size(p)) free((void *)p);
}
using ClassCopyFixupHandler = void (*)(Class _Nonnull oldClass,
Class _Nonnull newClass);
// Normally there's only one handler registered.
static GlobalSmallVector<ClassCopyFixupHandler, 1> classCopyFixupHandlers;
void _objc_setClassCopyFixupHandler(void (* _Nonnull newFixupHandler)
(Class _Nonnull oldClass, Class _Nonnull newClass)) {
mutex_locker_t lock(runtimeLock);
classCopyFixupHandlers.append(newFixupHandler);
}
static Class
alloc_class_for_subclass(Class supercls, size_t extraBytes)
{
if (!supercls || !supercls->isAnySwift()) {
return _calloc_class(sizeof(objc_class) + extraBytes);
}
// Superclass is a Swift class. New subclass must duplicate its extra bits.
// Allocate the new class, with space for super's prefix and suffix
// and self's extraBytes.
swift_class_t *swiftSupercls = (swift_class_t *)supercls;
size_t superSize = swiftSupercls->classSize;
void *superBits = swiftSupercls->baseAddress();
void *bits = malloc(superSize + extraBytes);
// Copy all of the superclass's data to the new class.
memcpy(bits, superBits, superSize);
// Erase the objc data and the Swift description in the new class.
swift_class_t *swcls = (swift_class_t *)
((uint8_t *)bits + swiftSupercls->classAddressOffset);
bzero(swcls, sizeof(objc_class));
swcls->description = nil;
for (auto handler : classCopyFixupHandlers) {
handler(supercls, (Class)swcls);
}
// Mark this class as Swift-enhanced.
if (supercls->isSwiftStable()) {
swcls->bits.setIsSwiftStable();
}
if (supercls->isSwiftLegacy()) {
swcls->bits.setIsSwiftLegacy();
}
return (Class)swcls;
}
/***********************************************************************
* object_getIndexedIvars.
**********************************************************************/
void *object_getIndexedIvars(id obj)
{
uint8_t *base = (uint8_t *)obj;
if (obj->isTaggedPointerOrNil()) return nil;
if (!obj->isClass()) return base + obj->ISA()->alignedInstanceSize();
Class cls = (Class)obj;
if (!cls->isAnySwift()) return base + sizeof(objc_class);
swift_class_t *swcls = (swift_class_t *)cls;
return base - swcls->classAddressOffset + word_align(swcls->classSize);
}
/***********************************************************************
* make_ro_writeable
* Reallocates rw->ro if necessary to make it writeable.
* Locking: runtimeLock must be held by the caller.
**********************************************************************/
static class_ro_t *make_ro_writeable(class_rw_t *rw)
{
runtimeLock.assertLocked();
if (rw->flags & RW_COPIED_RO) {
// already writeable, do nothing
} else {
rw->set_ro(rw->ro()->duplicate());
rw->flags |= RW_COPIED_RO;
}
return const_cast<class_ro_t *>(rw->ro());
}
/***********************************************************************
* dataSegmentsContain
* Returns true if the given address lies within a data segment in any
* loaded image.
**********************************************************************/
NEVER_INLINE
static bool
dataSegmentsContain(Class cls)
{
uint32_t index;
if (objc::dataSegmentsRanges.find((uintptr_t)cls, index)) {
// if the class is realized (hence has a class_rw_t),
// memorize where we found the range
if (cls->isRealized()) {
cls->data()->witness = (uint16_t)index;
}
return true;
}
return false;
}
/***********************************************************************
* isKnownClass
* Return true if the class is known to the runtime (located within the
* shared cache, within the data segment of a loaded image, or has been
* allocated with obj_allocateClassPair).
*
* The result of this operation is cached on the class in a "witness"
* value that is cheaply checked in the fastpath.
**********************************************************************/
ALWAYS_INLINE
static bool
isKnownClass(Class cls)
{
if (fastpath(objc::dataSegmentsRanges.contains(cls->data()->witness, (uintptr_t)cls))) {
return true;
}
auto &set = objc::allocatedClasses.get();
return set.find(cls) != set.end() || dataSegmentsContain(cls);
}
/***********************************************************************
* addClassTableEntry
* Add a class to the table of all classes. If addMeta is true,
* automatically adds the metaclass of the class as well.
* Locking: runtimeLock must be held by the caller.
**********************************************************************/
static void
addClassTableEntry(Class cls, bool addMeta = true)
{
runtimeLock.assertLocked();
// This class is allowed to be a known class via the shared cache or via
// data segments, but it is not allowed to be in the dynamic table already.
auto &set = objc::allocatedClasses.get();
ASSERT(set.find(cls) == set.end());
if (!isKnownClass(cls))
set.insert(cls);
if (addMeta)
addClassTableEntry(cls->ISA(), false);
}
/***********************************************************************
* checkIsKnownClass
* Checks the given class against the list of all known classes. Dies
* with a fatal error if the class is not known.
* Locking: runtimeLock must be held by the caller.
**********************************************************************/
ALWAYS_INLINE
static void
checkIsKnownClass(Class cls)
{
if (slowpath(!isKnownClass(cls))) {
_objc_fatal("Attempt to use unknown class %p.", cls);
}
}
/***********************************************************************
* classNSObject
* Returns class NSObject.
* Locking: none
**********************************************************************/
static Class classNSObject(void)
{
extern objc_class OBJC_CLASS_$_NSObject;
return (Class)&OBJC_CLASS_$_NSObject;
}
static Class metaclassNSObject(void)
{
extern objc_class OBJC_METACLASS_$_NSObject;
return (Class)&OBJC_METACLASS_$_NSObject;
}
/***********************************************************************
* printReplacements
* Implementation of PrintReplacedMethods / OBJC_PRINT_REPLACED_METHODS.
* Warn about methods from cats that override other methods in cats or cls.
* Assumes no methods from cats have been added to cls yet.
**********************************************************************/
__attribute__((cold, noinline))
static void
printReplacements(Class cls, const locstamped_category_t *cats_list, uint32_t cats_count)
{
uint32_t c;
bool isMeta = cls->isMetaClass();
// Newest categories are LAST in cats
// Later categories override earlier ones.
for (c = 0; c < cats_count; c++) {
category_t *cat = cats_list[c].cat;
method_list_t *mlist = cat->methodsForMeta(isMeta);
if (!mlist) continue;
for (const auto& meth : *mlist) {
SEL s = sel_registerName(sel_cname(meth.name()));
// Search for replaced methods in method lookup order.
// Complain about the first duplicate only.
// Look for method in earlier categories
for (uint32_t c2 = 0; c2 < c; c2++) {
category_t *cat2 = cats_list[c2].cat;
const method_list_t *mlist2 = cat2->methodsForMeta(isMeta);
if (!mlist2) continue;
for (const auto& meth2 : *mlist2) {
SEL s2 = sel_registerName(sel_cname(meth2.name()));
if (s == s2) {
logReplacedMethod(cls->nameForLogging(), s,
cls->isMetaClass(), cat->name,
meth2.imp(false), meth.imp(false));
goto complained;
}
}
}
// Look for method in cls
for (const auto& meth2 : cls->data()->methods()) {
SEL s2 = sel_registerName(sel_cname(meth2.name()));
if (s == s2) {
logReplacedMethod(cls->nameForLogging(), s,
cls->isMetaClass(), cat->name,
meth2.imp(false), meth.imp(false));
goto complained;
}
}
complained:
;
}
}
}
/***********************************************************************
* unreasonableClassCount
* Provides an upper bound for any iteration of classes,
* to prevent spins when runtime metadata is corrupted.
**********************************************************************/
static unsigned unreasonableClassCount()
{
runtimeLock.assertLocked();
int base = NXCountMapTable(gdb_objc_realized_classes) +
getPreoptimizedClassUnreasonableCount();
// Provide lots of slack here. Some iterations touch metaclasses too.
// Some iterations backtrack (like realized class iteration).
// We don't need an efficient bound, merely one that prevents spins.
return (base + 1) * 16;
}
/***********************************************************************
* Class enumerators
* The passed in block returns `false` if subclasses can be skipped
* Locking: runtimeLock must be held by the caller.
**********************************************************************/
static inline void
foreach_realized_class_and_subclass_2(Class top, unsigned &count,
bool skip_metaclass,
bool (^code)(Class) __attribute((noescape)))
{
Class cls = top;
runtimeLock.assertLocked();
ASSERT(top);
while (1) {
if (--count == 0) {
_objc_fatal("Memory corruption in class list.");
}
bool skip_subclasses;
if (skip_metaclass && cls->isMetaClass()) {
skip_subclasses = true;
} else {
skip_subclasses = !code(cls);
}
if (!skip_subclasses && cls->data()->firstSubclass) {
cls = cls->data()->firstSubclass;
} else {
while (!cls->data()->nextSiblingClass && cls != top) {
cls = cls->getSuperclass();
if (--count == 0) {
_objc_fatal("Memory corruption in class list.");
}
}
if (cls == top) break;
cls = cls->data()->nextSiblingClass;
}
}
}
// Enumerates a class and all of its realized subclasses.
static void
foreach_realized_class_and_subclass(Class top, bool (^code)(Class) __attribute((noescape)))
{
unsigned int count = unreasonableClassCount();
foreach_realized_class_and_subclass_2(top, count, false, code);
}
// Enumerates all realized classes and metaclasses.
static void
foreach_realized_class_and_metaclass(bool (^code)(Class) __attribute((noescape)))
{
unsigned int count = unreasonableClassCount();
for (Class top = _firstRealizedClass;
top != nil;
top = top->data()->nextSiblingClass)
{
foreach_realized_class_and_subclass_2(top, count, false, code);
}
}
// Enumerates all realized classes (ignoring metaclasses).
static void
foreach_realized_class(bool (^code)(Class) __attribute((noescape)))
{
unsigned int count = unreasonableClassCount();
for (Class top = _firstRealizedClass;
top != nil;
top = top->data()->nextSiblingClass)
{
foreach_realized_class_and_subclass_2(top, count, true, code);
}
}
/***********************************************************************
* Method Scanners / Optimization tracking
* Implementation of scanning for various implementations of methods.
**********************************************************************/
namespace objc {
enum SelectorBundle {
AWZ,
RR,
Core,
};
namespace scanner {
// The current state of NSObject swizzling for every scanner
//
// It allows for cheap checks of global swizzles, and also lets
// things like IMP Swizzling before NSObject has been initialized
// to be remembered, as setInitialized() would miss these.
//
// Every pair of bits describes a SelectorBundle.
// even bits: is NSObject class swizzled for this bundle
// odd bits: is NSObject meta class swizzled for this bundle
static uintptr_t NSObjectSwizzledMask;
static ALWAYS_INLINE uintptr_t
swizzlingBit(SelectorBundle bundle, bool isMeta)
{
return 1UL << (2 * bundle + isMeta);
}
static void __attribute__((cold, noinline))
printCustom(Class cls, SelectorBundle bundle, bool inherited)
{
static char const * const SelectorBundleName[] = {
[AWZ] = "CUSTOM AWZ",
[RR] = "CUSTOM RR",
[Core] = "CUSTOM Core",
};
_objc_inform("%s: %s%s%s", SelectorBundleName[bundle],
cls->nameForLogging(),
cls->isMetaClass() ? " (meta)" : "",
inherited ? " (inherited)" : "");
}
enum class Scope { Instances, Classes, Both };
template <typename Traits, SelectorBundle Bundle, bool &ShouldPrint, Scope Domain = Scope::Both>
class Mixin {
// work around compiler being broken with templates using Class/objc_class,
// probably some weird confusion with Class being builtin
ALWAYS_INLINE static objc_class *as_objc_class(Class cls) {
return (objc_class *)cls;
}
static void
setCustomRecursively(Class cls, bool inherited = false)
{
foreach_realized_class_and_subclass(cls, [=](Class c){
if (c != cls && !as_objc_class(c)->isInitialized()) {
// Subclass not yet initialized. Wait for setInitialized() to do it
return false;
}
if (Traits::isCustom(c)) {
return false;
}
Traits::setCustom(c);
if (ShouldPrint) {
printCustom(cls, Bundle, inherited || c != cls);
}
return true;
});
}
static bool
isNSObjectSwizzled(bool isMeta)
{
return NSObjectSwizzledMask & swizzlingBit(Bundle, isMeta);
}
static void
setNSObjectSwizzled(Class NSOClass, bool isMeta)
{
NSObjectSwizzledMask |= swizzlingBit(Bundle, isMeta);
if (as_objc_class(NSOClass)->isInitialized()) {
setCustomRecursively(NSOClass);
}
}
static void
scanChangedMethodForUnknownClass(const method_t *meth)
{
Class cls;
cls = classNSObject();
if (Domain != Scope::Classes && !isNSObjectSwizzled(NO)) {
for (const auto &meth2: as_objc_class(cls)->data()->methods()) {
if (meth == &meth2) {
setNSObjectSwizzled(cls, NO);
break;
}
}
}
cls = metaclassNSObject();
if (Domain != Scope::Instances && !isNSObjectSwizzled(YES)) {
for (const auto &meth2: as_objc_class(cls)->data()->methods()) {
if (meth == &meth2) {
setNSObjectSwizzled(cls, YES);
break;
}
}
}
}
static void
scanAddedClassImpl(Class cls, bool isMeta)
{
bool setCustom = NO, inherited = NO;
if (isNSObjectSwizzled(isMeta)) {
setCustom = YES;
} else if (Traits::knownClassHasDefaultImpl(cls, isMeta)) {
// This class is known to have the default implementations,
// but we need to check categories.
auto &methods = as_objc_class(cls)->data()->methods();
setCustom = Traits::scanMethodLists(methods.beginCategoryMethodLists(),
methods.endCategoryMethodLists(cls));
} else if (!isMeta && !as_objc_class(cls)->getSuperclass()) {
// Custom Root class
setCustom = YES;
} else if (Traits::isCustom(as_objc_class(cls)->getSuperclass())) {
// Superclass is custom, therefore we are too.
setCustom = YES;
inherited = YES;
} else {
// Not NSObject.
auto &methods = as_objc_class(cls)->data()->methods();
setCustom = Traits::scanMethodLists(methods.beginLists(),
methods.endLists());
}
if (slowpath(setCustom)) {
if (ShouldPrint) printCustom(cls, Bundle, inherited);
} else {
Traits::setDefault(cls);
}
}
public:
static bool knownClassHasDefaultImpl(Class cls, bool isMeta) {
// Typically only NSObject has default implementations.
// Allow this to be extended by overriding (to allow
// SwiftObject, for example).
Class NSOClass = (isMeta ? metaclassNSObject() : classNSObject());
return cls == NSOClass;
}
// Scan a class that is about to be marked Initialized for particular
// bundles of selectors, and mark the class and its children
// accordingly.
//
// This also handles inheriting properties from its superclass.
//
// Caller: objc_class::setInitialized()
static void
scanInitializedClass(Class cls, Class metacls)
{
if (Domain != Scope::Classes) {
scanAddedClassImpl(cls, false);
}
if (Domain != Scope::Instances) {
scanAddedClassImpl(metacls, true);
}
}
// Inherit various properties from the superclass when a class
// is being added to the graph.
//
// Caller: addSubclass()
static void
scanAddedSubClass(Class subcls, Class supercls)
{
if (slowpath(Traits::isCustom(supercls) && !Traits::isCustom(subcls))) {
setCustomRecursively(subcls, true);
}
}
// Scan Method lists for selectors that would override things
// in a Bundle.
//
// This is used to detect when categories override problematic selectors
// are injected in a class after it has been initialized.
//
// Caller: prepareMethodLists()
static void
scanAddedMethodLists(Class cls, method_list_t **mlists, int count)
{
if (slowpath(Traits::isCustom(cls))) {
return;
}
if (slowpath(Traits::scanMethodLists(mlists, mlists + count))) {
setCustomRecursively(cls);
}
}
// Handle IMP Swizzling (the IMP for an exisiting method being changed).
//
// In almost all cases, IMP swizzling does not affect custom bits.
// Custom search will already find the method whether or not
// it is swizzled, so it does not transition from non-custom to custom.
//
// The only cases where IMP swizzling can affect the custom bits is
// if the swizzled method is one of the methods that is assumed to be
// non-custom. These special cases are listed in setInitialized().
// We look for such cases here.
//
// Caller: Swizzling methods via adjustCustomFlagsForMethodChange()
static void
scanChangedMethod(Class cls, const method_t *meth)
{
if (fastpath(!Traits::isInterestingSelector(meth->name()))) {
return;
}
if (cls) {
bool isMeta = as_objc_class(cls)->isMetaClass();
if (isMeta && Domain != Scope::Instances) {
if (cls == metaclassNSObject() && !isNSObjectSwizzled(isMeta)) {
setNSObjectSwizzled(cls, isMeta);
}
}
if (!isMeta && Domain != Scope::Classes) {
if (cls == classNSObject() && !isNSObjectSwizzled(isMeta)) {
setNSObjectSwizzled(cls, isMeta);
}
}
} else {
// We're called from method_exchangeImplementations, only NSObject
// class and metaclass may be problematic (exchanging the default
// builtin IMP of an interesting seleector, is a swizzling that,
// may flip our scanned property. For other classes, the previous
// value had already flipped the property).
//
// However, as we don't know the class, we need to scan all of
// NSObject class and metaclass methods (this is SLOW).
scanChangedMethodForUnknownClass(meth);
}
}
};
} // namespace scanner
// AWZ methods: +alloc / +allocWithZone:
struct AWZScanner : scanner::Mixin<AWZScanner, AWZ, PrintCustomAWZ, scanner::Scope::Classes> {
static bool isCustom(Class cls) {
return cls->hasCustomAWZ();
}
static void setCustom(Class cls) {
cls->setHasCustomAWZ();
}
static void setDefault(Class cls) {
cls->setHasDefaultAWZ();
}
static bool isInterestingSelector(SEL sel) {
return sel == @selector(alloc) || sel == @selector(allocWithZone:);
}
template<typename T>
static bool scanMethodLists(T *mlists, T *end) {
SEL sels[2] = { @selector(alloc), @selector(allocWithZone:), };
return method_lists_contains_any(mlists, end, sels, 2);
}
};
// Retain/Release methods that are extremely rarely overridden
//
// retain/release/autorelease/retainCount/
// _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
struct RRScanner : scanner::Mixin<RRScanner, RR, PrintCustomRR
#if !SUPPORT_NONPOINTER_ISA
, scanner::Scope::Instances
#endif
> {
static bool isCustom(Class cls) {
return cls->hasCustomRR();
}
static void setCustom(Class cls) {
cls->setHasCustomRR();
}
static void setDefault(Class cls) {
cls->setHasDefaultRR();
}
static bool isInterestingSelector(SEL sel) {
return sel == @selector(retain) ||
sel == @selector(release) ||
sel == @selector(autorelease) ||
sel == @selector(_tryRetain) ||
sel == @selector(_isDeallocating) ||
sel == @selector(retainCount) ||
sel == @selector(allowsWeakReference) ||
sel == @selector(retainWeakReference);
}
template <typename T>
static bool scanMethodLists(T *mlists, T *end) {
SEL sels[8] = {
@selector(retain),
@selector(release),
@selector(autorelease),
@selector(_tryRetain),
@selector(_isDeallocating),
@selector(retainCount),
@selector(allowsWeakReference),
@selector(retainWeakReference),
};
return method_lists_contains_any(mlists, end, sels, 8);
}
};
// Core NSObject methods that are extremely rarely overridden
//
// +new, ±class, ±self, ±isKindOfClass:, ±respondsToSelector
struct CoreScanner : scanner::Mixin<CoreScanner, Core, PrintCustomCore> {
static bool knownClassHasDefaultImpl(Class cls, bool isMeta) {
if (scanner::Mixin<CoreScanner, Core, PrintCustomCore>::knownClassHasDefaultImpl(cls, isMeta))
return true;
if ((cls->isRootClass() || cls->isRootMetaclass())
&& strcmp(cls->mangledName(), "_TtCs12_SwiftObject") == 0)
return true;
return false;
}
static bool isCustom(Class cls) {
return cls->hasCustomCore();
}
static void setCustom(Class cls) {
cls->setHasCustomCore();
}
static void setDefault(Class cls) {
cls->setHasDefaultCore();
}
static bool isInterestingSelector(SEL sel) {
return sel == @selector(new) ||
sel == @selector(self) ||
sel == @selector(class) ||
sel == @selector(isKindOfClass:) ||
sel == @selector(respondsToSelector:);
}
template <typename T>
static bool scanMethodLists(T *mlists, T *end) {
SEL sels[5] = {
@selector(new),
@selector(self),
@selector(class),
@selector(isKindOfClass:),
@selector(respondsToSelector:)
};
return method_lists_contains_any(mlists, end, sels, 5);
}
};
class category_list : nocopy_t {
union {
locstamped_category_t lc;
struct {
locstamped_category_t *array;
// this aliases with locstamped_category_t::hi
// which is an aliased pointer
uint32_t is_array : 1;
uint32_t count : 31;
uint32_t size : 32;
};
} _u;
public:
category_list() : _u{{nullptr, nullptr}} { }
category_list(locstamped_category_t lc) : _u{{lc}} { }
category_list(category_list &&other) : category_list() {
std::swap(_u, other._u);
}
~category_list()
{
if (_u.is_array) {
free(_u.array);
}
}
uint32_t count() const
{
if (_u.is_array) return _u.count;
return _u.lc.cat ? 1 : 0;
}
uint32_t arrayByteSize(uint32_t size) const
{
return sizeof(locstamped_category_t) * size;
}
const locstamped_category_t *array() const
{
return _u.is_array ? _u.array : &_u.lc;
}
void append(locstamped_category_t lc)
{
if (_u.is_array) {
if (_u.count == _u.size) {
// Have a typical malloc growth:
// - size <= 8: grow by 2
// - size <= 16: grow by 4
// - size <= 32: grow by 8
// ... etc
_u.size += _u.size < 8 ? 2 : 1 << (fls(_u.size) - 2);
_u.array = (locstamped_category_t *)reallocf(_u.array, arrayByteSize(_u.size));
}
_u.array[_u.count++] = lc;
} else if (_u.lc.cat == NULL) {
_u.lc = lc;
} else {
locstamped_category_t *arr = (locstamped_category_t *)malloc(arrayByteSize(2));
arr[0] = _u.lc;
arr[1] = lc;
_u.array = arr;
_u.is_array = true;
_u.count = 2;
_u.size = 2;
}
}
void erase(category_t *cat)
{
if (_u.is_array) {
for (int i = 0; i < _u.count; i++) {
if (_u.array[i].cat == cat) {
// shift entries to preserve list order
memmove(&_u.array[i], &_u.array[i+1], arrayByteSize(_u.count - i - 1));
return;
}
}
} else if (_u.lc.cat == cat) {
_u.lc.cat = NULL;
_u.lc.hi = NULL;
}
}
};
class UnattachedCategories : public ExplicitInitDenseMap<Class, category_list>
{
public:
void addForClass(locstamped_category_t lc, Class cls)
{
runtimeLock.assertLocked();
if (slowpath(PrintConnecting)) {
_objc_inform("CLASS: found category %c%s(%s)",
cls->isMetaClassMaybeUnrealized() ? '+' : '-',
cls->nameForLogging(), lc.cat->name);
}
auto result = get().try_emplace(cls, lc);
if (!result.second) {
result.first->second.append(lc);
}
}
void attachToClass(Class cls, Class previously, int flags)
{
runtimeLock.assertLocked();
ASSERT((flags & ATTACH_CLASS) ||
(flags & ATTACH_METACLASS) ||
(flags & ATTACH_CLASS_AND_METACLASS));
auto &map = get();
auto it = map.find(previously);
if (it != map.end()) {
category_list &list = it->second;
if (flags & ATTACH_CLASS_AND_METACLASS) {
int otherFlags = flags & ~ATTACH_CLASS_AND_METACLASS;
attachCategories(cls, list.array(), list.count(), otherFlags | ATTACH_CLASS);
attachCategories(cls->ISA(), list.array(), list.count(), otherFlags | ATTACH_METACLASS);
} else {
attachCategories(cls, list.array(), list.count(), flags);
}
map.erase(it);
}
}
void eraseCategoryForClass(category_t *cat, Class cls)
{
runtimeLock.assertLocked();
auto &map = get();
auto it = map.find(cls);
if (it != map.end()) {
category_list &list = it->second;
list.erase(cat);
if (list.count() == 0) {
map.erase(it);
}
}
}
void eraseClass(Class cls)
{
runtimeLock.assertLocked();
get().erase(cls);
}
};
static UnattachedCategories unattachedCategories;
} // namespace objc
static bool isBundleClass(Class cls)
{
return cls->data()->ro()->flags & RO_FROM_BUNDLE;
}
static void
fixupMethodList(method_list_t *mlist, bool bundleCopy, bool sort)
{
runtimeLock.assertLocked();
ASSERT(!mlist->isFixedUp());
// fixme lock less in attachMethodLists ?
// dyld3 may have already uniqued, but not sorted, the list
if (!mlist->isUniqued()) {
mutex_locker_t lock(selLock);
// Unique selectors in list.
for (auto& meth : *mlist) {
const char *name = sel_cname(meth.name());
meth.setName(sel_registerNameNoLock(name, bundleCopy));
}
}
// Sort by selector address.
// Don't try to sort small lists, as they're immutable.
// Don't try to sort big lists of nonstandard size, as stable_sort
// won't copy the entries properly.
if (sort && !mlist->isSmallList() && mlist->entsize() == method_t::bigSize) {
method_t::SortBySELAddress sorter;
std::stable_sort(&mlist->begin()->big(), &mlist->end()->big(), sorter);
}
// Mark method list as uniqued and sorted.
// Can't mark small lists, since they're immutable.
if (!mlist->isSmallList()) {
mlist->setFixedUp();
}
}
static void
prepareMethodLists(Class cls, method_list_t **addedLists, int addedCount,
bool baseMethods, bool methodsFromBundle, const char *why)
{
runtimeLock.assertLocked();
if (addedCount == 0) return;
// There exist RR/AWZ/Core special cases for some class's base methods.
// But this code should never need to scan base methods for RR/AWZ/Core:
// default RR/AWZ/Core cannot be set before setInitialized().
// Therefore we need not handle any special cases here.
if (baseMethods) {
ASSERT(cls->hasCustomAWZ() && cls->hasCustomRR() && cls->hasCustomCore());
} else if (cls->cache.isConstantOptimizedCache()) {
cls->setDisallowPreoptCachesRecursively(why);
} else if (cls->allowsPreoptInlinedSels()) {
#if CONFIG_USE_PREOPT_CACHES
SEL *sels = (SEL *)objc_opt_offsets[OBJC_OPT_INLINED_METHODS_START];
SEL *sels_end = (SEL *)objc_opt_offsets[OBJC_OPT_INLINED_METHODS_END];
if (method_lists_contains_any(addedLists, addedLists + addedCount, sels, sels_end - sels)) {
cls->setDisallowPreoptInlinedSelsRecursively(why);
}
#endif
}
// Add method lists to array.
// Reallocate un-fixed method lists.
// The new methods are PREPENDED to the method list array.
for (int i = 0; i < addedCount; i++) {
method_list_t *mlist = addedLists[i];
ASSERT(mlist);
// Fixup selectors if necessary
if (!mlist->isFixedUp()) {
fixupMethodList(mlist, methodsFromBundle, true/*sort*/);
}
}
// If the class is initialized, then scan for method implementations
// tracked by the class's flags. If it's not initialized yet,
// then objc_class::setInitialized() will take care of it.
if (cls->isInitialized()) {
objc::AWZScanner::scanAddedMethodLists(cls, addedLists, addedCount);
objc::RRScanner::scanAddedMethodLists(cls, addedLists, addedCount);
objc::CoreScanner::scanAddedMethodLists(cls, addedLists, addedCount);
}
}
class_rw_ext_t *
class_rw_t::extAlloc(const class_ro_t *ro, bool deepCopy)
{
runtimeLock.assertLocked();
auto rwe = objc::zalloc<class_rw_ext_t>();
rwe->version = (ro->flags & RO_META) ? 7 : 0;
method_list_t *list = ro->baseMethods();
if (list) {
if (deepCopy) list = list->duplicate();
rwe->methods.attachLists(&list, 1);
}
// See comments in objc_duplicateClass
// property lists and protocol lists historically
// have not been deep-copied
//
// This is probably wrong and ought to be fixed some day
property_list_t *proplist = ro->baseProperties;
if (proplist) {
rwe->properties.attachLists(&proplist, 1);
}
protocol_list_t *protolist = ro->baseProtocols;
if (protolist) {
rwe->protocols.attachLists(&protolist, 1);
}
set_ro_or_rwe(rwe, ro);
return rwe;
}
// Attach method lists and properties and protocols from categories to a class.
// Assumes the categories in cats are all loaded and sorted by load order,
// oldest categories first.
static void
attachCategories(Class cls, const locstamped_category_t *cats_list, uint32_t cats_count,
int flags)
{
if (slowpath(PrintReplacedMethods)) {
printReplacements(cls, cats_list, cats_count);
}
if (slowpath(PrintConnecting)) {
_objc_inform("CLASS: attaching %d categories to%s class '%s'%s",
cats_count, (flags & ATTACH_EXISTING) ? " existing" : "",
cls->nameForLogging(), (flags & ATTACH_METACLASS) ? " (meta)" : "");
}
/*
* Only a few classes have more than 64 categories during launch.
* This uses a little stack, and avoids malloc.
*
* Categories must be added in the proper order, which is back
* to front. To do that with the chunking, we iterate cats_list
* from front to back, build up the local buffers backwards,
* and call attachLists on the chunks. attachLists prepends the
* lists, so the final result is in the expected order.
*/
constexpr uint32_t ATTACH_BUFSIZ = 64;
method_list_t *mlists[ATTACH_BUFSIZ];
property_list_t *proplists[ATTACH_BUFSIZ];
protocol_list_t *protolists[ATTACH_BUFSIZ];
uint32_t mcount = 0;
uint32_t propcount = 0;
uint32_t protocount = 0;
bool fromBundle = NO;
bool isMeta = (flags & ATTACH_METACLASS);
auto rwe = cls->data()->extAllocIfNeeded();
for (uint32_t i = 0; i < cats_count; i++) {
auto& entry = cats_list[i];
method_list_t *mlist = entry.cat->methodsForMeta(isMeta);
if (mlist) {
if (mcount == ATTACH_BUFSIZ) {
prepareMethodLists(cls, mlists, mcount, NO, fromBundle, __func__);
rwe->methods.attachLists(mlists, mcount);
mcount = 0;
}
mlists[ATTACH_BUFSIZ - ++mcount] = mlist;
fromBundle |= entry.hi->isBundle();
}
property_list_t *proplist =
entry.cat->propertiesForMeta(isMeta, entry.hi);
if (proplist) {
if (propcount == ATTACH_BUFSIZ) {
rwe->properties.attachLists(proplists, propcount);
propcount = 0;
}
proplists[ATTACH_BUFSIZ - ++propcount] = proplist;
}
protocol_list_t *protolist = entry.cat->protocolsForMeta(isMeta);
if (protolist) {
if (protocount == ATTACH_BUFSIZ) {
rwe->protocols.attachLists(protolists, protocount);
protocount = 0;
}
protolists[ATTACH_BUFSIZ - ++protocount] = protolist;
}
}
if (mcount > 0) {
prepareMethodLists(cls, mlists + ATTACH_BUFSIZ - mcount, mcount,
NO, fromBundle, __func__);
rwe->methods.attachLists(mlists + ATTACH_BUFSIZ - mcount, mcount);
if (flags & ATTACH_EXISTING) {
flushCaches(cls, __func__, [](Class c){
// constant caches have been dealt with in prepareMethodLists
// if the class still is constant here, it's fine to keep
return !c->cache.isConstantOptimizedCache();
});
}
}
rwe->properties.attachLists(proplists + ATTACH_BUFSIZ - propcount, propcount);
rwe->protocols.attachLists(protolists + ATTACH_BUFSIZ - protocount, protocount);
}
/***********************************************************************
* methodizeClass
* Fixes up cls's method list, protocol list, and property list.
* Attaches any outstanding categories.
* Locking: runtimeLock must be held by the caller
**********************************************************************/
static void methodizeClass(Class cls, Class previously)
{
runtimeLock.assertLocked();
bool isMeta = cls->isMetaClass();
auto rw = cls->data();
auto ro = rw->ro();
auto rwe = rw->ext();
// Methodizing for the first time
if (PrintConnecting) {
_objc_inform("CLASS: methodizing class '%s' %s",
cls->nameForLogging(), isMeta ? "(meta)" : "");
}
// Install methods and properties that the class implements itself.
method_list_t *list = ro->baseMethods();
if (list) {
prepareMethodLists(cls, &list, 1, YES, isBundleClass(cls), nullptr);
if (rwe) rwe->methods.attachLists(&list, 1);
}
property_list_t *proplist = ro->baseProperties;
if (rwe && proplist) {
rwe->properties.attachLists(&proplist, 1);
}
protocol_list_t *protolist = ro->baseProtocols;
if (rwe && protolist) {
rwe->protocols.attachLists(&protolist, 1);
}
// Root classes get bonus method implementations if they don't have
// them already. These apply before category replacements.
if (cls->isRootMetaclass()) {
// root metaclass
addMethod(cls, @selector(initialize), (IMP)&objc_noop_imp, "", NO);
}
// Attach categories.
if (previously) {
if (isMeta) {
objc::unattachedCategories.attachToClass(cls, previously,
ATTACH_METACLASS);
} else {
// When a class relocates, categories with class methods
// may be registered on the class itself rather than on
// the metaclass. Tell attachToClass to look for those.
objc::unattachedCategories.attachToClass(cls, previously,
ATTACH_CLASS_AND_METACLASS);
}
}
objc::unattachedCategories.attachToClass(cls, cls,
isMeta ? ATTACH_METACLASS : ATTACH_CLASS);
#if DEBUG
// Debug: sanity-check all SELs; log method list contents
for (const auto& meth : rw->methods()) {
if (PrintConnecting) {
_objc_inform("METHOD %c[%s %s]", isMeta ? '+' : '-',
cls->nameForLogging(), sel_getName(meth.name()));
}
ASSERT(sel_registerName(sel_getName(meth.name())) == meth.name());
}
#endif
}
/***********************************************************************
* nonMetaClasses
* Returns the secondary metaclass => class map
* Used for some cases of +initialize and +resolveClassMethod:.
* This map does not contain all class and metaclass pairs. It only
* contains metaclasses whose classes would be in the runtime-allocated
* named-class table, but are not because some other class with the same name
* is in that table.
* Classes with no duplicates are not included.
* Classes in the preoptimized named-class table are not included.
* Classes whose duplicates are in the preoptimized table are not included.
* Most code should use getMaybeUnrealizedNonMetaClass()
* instead of reading this table.
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
static NXMapTable *nonmeta_class_map = nil;
static NXMapTable *nonMetaClasses(void)
{
runtimeLock.assertLocked();
if (nonmeta_class_map) return nonmeta_class_map;
// nonmeta_class_map is typically small
INIT_ONCE_PTR(nonmeta_class_map,
NXCreateMapTable(NXPtrValueMapPrototype, 32),
NXFreeMapTable(v));
return nonmeta_class_map;
}
/***********************************************************************
* addNonMetaClass
* Adds metacls => cls to the secondary metaclass map
* Locking: runtimeLock must be held by the caller
**********************************************************************/
static void addNonMetaClass(Class cls)
{
runtimeLock.assertLocked();
void *old;
old = NXMapInsert(nonMetaClasses(), cls->ISA(), cls);
ASSERT(!cls->isMetaClassMaybeUnrealized());
ASSERT(cls->ISA()->isMetaClassMaybeUnrealized());
ASSERT(!old);
}
static void removeNonMetaClass(Class cls)
{
runtimeLock.assertLocked();
NXMapRemove(nonMetaClasses(), cls->ISA());
}
static bool scanMangledField(const char *&string, const char *end,
const char *&field, int& length)
{
// Leading zero not allowed.
if (*string == '0') return false;
length = 0;
field = string;
while (field < end) {
char c = *field;
if (!isdigit(c)) break;
field++;
if (__builtin_smul_overflow(length, 10, &length)) return false;
if (__builtin_sadd_overflow(length, c - '0', &length)) return false;
}
string = field + length;
return length > 0 && string <= end;
}
/***********************************************************************
* copySwiftV1DemangledName
* Returns the pretty form of the given Swift-v1-mangled class or protocol name.
* Returns nil if the string doesn't look like a mangled Swift v1 name.
* The result must be freed with free().
**********************************************************************/
static char *copySwiftV1DemangledName(const char *string, bool isProtocol = false)
{
if (!string) return nil;
// Swift mangling prefix.
if (strncmp(string, isProtocol ? "_TtP" : "_TtC", 4) != 0) return nil;
string += 4;
const char *end = string + strlen(string);
// Module name.
const char *prefix;
int prefixLength;
if (string[0] == 's') {
// "s" is the Swift module.
prefix = "Swift";
prefixLength = 5;
string += 1;
} else {
if (! scanMangledField(string, end, prefix, prefixLength)) return nil;
}
// Class or protocol name.
const char *suffix;
int suffixLength;
if (! scanMangledField(string, end, suffix, suffixLength)) return nil;
if (isProtocol) {
// Remainder must be "_".
if (strcmp(string, "_") != 0) return nil;
} else {
// Remainder must be empty.
if (string != end) return nil;
}
char *result;
asprintf(&result, "%.*s.%.*s", prefixLength,prefix, suffixLength,suffix);
return result;
}
/***********************************************************************
* copySwiftV1MangledName
* Returns the Swift 1.0 mangled form of the given class or protocol name.
* Returns nil if the string doesn't look like an unmangled Swift name.
* The result must be freed with free().
**********************************************************************/
static char *copySwiftV1MangledName(const char *string, bool isProtocol = false)
{
if (!string) return nil;
size_t dotCount = 0;
size_t dotIndex;
const char *s;
for (s = string; *s; s++) {
if (*s == '.') {
dotCount++;
dotIndex = s - string;
}
}
size_t stringLength = s - string;
if (dotCount != 1 || dotIndex == 0 || dotIndex >= stringLength-1) {
return nil;
}
const char *prefix = string;
size_t prefixLength = dotIndex;
const char *suffix = string + dotIndex + 1;
size_t suffixLength = stringLength - (dotIndex + 1);
char *name;
if (prefixLength == 5 && memcmp(prefix, "Swift", 5) == 0) {
asprintf(&name, "_Tt%cs%zu%.*s%s",
isProtocol ? 'P' : 'C',
suffixLength, (int)suffixLength, suffix,
isProtocol ? "_" : "");
} else {
asprintf(&name, "_Tt%c%zu%.*s%zu%.*s%s",
isProtocol ? 'P' : 'C',
prefixLength, (int)prefixLength, prefix,
suffixLength, (int)suffixLength, suffix,
isProtocol ? "_" : "");
}
return name;
}
/***********************************************************************
* getClassExceptSomeSwift
* Looks up a class by name. The class MIGHT NOT be realized.
* Demangled Swift names are recognized.
* Classes known to the Swift runtime but not yet used are NOT recognized.
* (such as subclasses of un-instantiated generics)
* Use look_up_class() to find them as well.
* Locking: runtimeLock must be read- or write-locked by the caller.
**********************************************************************/
// This is a misnomer: gdb_objc_realized_classes is actually a list of
// named classes not in the dyld shared cache, whether realized or not.
// This list excludes lazily named classes, which have to be looked up
// using a getClass hook.
NXMapTable *gdb_objc_realized_classes; // exported for debuggers in objc-gdb.h
uintptr_t objc_debug_realized_class_generation_count;
static Class getClass_impl(const char *name)
{
runtimeLock.assertLocked();
// allocated in _read_images
ASSERT(gdb_objc_realized_classes);
// Try runtime-allocated table
Class result = (Class)NXMapGet(gdb_objc_realized_classes, name);
if (result) return result;
// Try table from dyld shared cache.
// Note we do this last to handle the case where we dlopen'ed a shared cache
// dylib with duplicates of classes already present in the main executable.
// In that case, we put the class from the main executable in
// gdb_objc_realized_classes and want to check that before considering any
// newly loaded shared cache binaries.
return getPreoptimizedClass(name);
}
static Class getClassExceptSomeSwift(const char *name)
{
runtimeLock.assertLocked();
// Try name as-is
Class result = getClass_impl(name);
if (result) return result;
// Try Swift-mangled equivalent of the given name.
if (char *swName = copySwiftV1MangledName(name)) {
result = getClass_impl(swName);
free(swName);
return result;
}
return nil;
}
/***********************************************************************
* addNamedClass
* Adds name => cls to the named non-meta class map.
* Warns about duplicate class names and keeps the old mapping.
* Locking: runtimeLock must be held by the caller
**********************************************************************/
static void addNamedClass(Class cls, const char *name, Class replacing = nil)
{
runtimeLock.assertLocked();
Class old;
if ((old = getClassExceptSomeSwift(name)) && old != replacing) {
inform_duplicate(name, old, cls);
// getMaybeUnrealizedNonMetaClass uses name lookups.
// Classes not found by name lookup must be in the
// secondary meta->nonmeta table.
addNonMetaClass(cls);
} else {
NXMapInsert(gdb_objc_realized_classes, name, cls);
}
ASSERT(!(cls->data()->flags & RO_META));
// wrong: constructed classes are already realized when they get here
// ASSERT(!cls->isRealized());
}
/***********************************************************************
* removeNamedClass
* Removes cls from the name => cls map.
* Locking: runtimeLock must be held by the caller
**********************************************************************/
static void removeNamedClass(Class cls, const char *name)
{
runtimeLock.assertLocked();
ASSERT(!(cls->data()->flags & RO_META));
if (cls == NXMapGet(gdb_objc_realized_classes, name)) {
NXMapRemove(gdb_objc_realized_classes, name);
} else {
// cls has a name collision with another class - don't remove the other
// but do remove cls from the secondary metaclass->class map.
removeNonMetaClass(cls);
}
}
/***********************************************************************
* futureNamedClasses
* Returns the classname => future class map for unrealized future classes.
* Locking: runtimeLock must be held by the caller
**********************************************************************/
static NXMapTable *future_named_class_map = nil;
static NXMapTable *futureNamedClasses()
{
runtimeLock.assertLocked();
if (future_named_class_map) return future_named_class_map;
// future_named_class_map is big enough for CF's classes and a few others
future_named_class_map =
NXCreateMapTable(NXStrValueMapPrototype, 32);
return future_named_class_map;
}
static bool haveFutureNamedClasses() {
return future_named_class_map && NXCountMapTable(future_named_class_map);
}
/***********************************************************************
* addFutureNamedClass
* Installs cls as the class structure to use for the named class if it appears.
* Locking: runtimeLock must be held by the caller
**********************************************************************/
static void addFutureNamedClass(const char *name, Class cls)
{
void *old;
runtimeLock.assertLocked();
if (PrintFuture) {
_objc_inform("FUTURE: reserving %p for %s", (void*)cls, name);
}
class_rw_t *rw = objc::zalloc<class_rw_t>();
class_ro_t *ro = (class_ro_t *)calloc(sizeof(class_ro_t), 1);
ro->name.store(strdupIfMutable(name), std::memory_order_relaxed);
rw->set_ro(ro);
cls->setData(rw);
cls->data()->flags = RO_FUTURE;
old = NXMapKeyCopyingInsert(futureNamedClasses(), name, cls);
ASSERT(!old);
}
/***********************************************************************
* popFutureNamedClass
* Removes the named class from the unrealized future class list,
* because it has been realized.
* Returns nil if the name is not used by a future class.
* Locking: runtimeLock must be held by the caller
**********************************************************************/
static Class popFutureNamedClass(const char *name)
{
runtimeLock.assertLocked();
Class cls = nil;
if (future_named_class_map) {
cls = (Class)NXMapKeyFreeingRemove(future_named_class_map, name);
if (cls && NXCountMapTable(future_named_class_map) == 0) {
NXFreeMapTable(future_named_class_map);
future_named_class_map = nil;
}
}
return cls;
}
/***********************************************************************
* remappedClasses
* Returns the oldClass => newClass map for realized future classes.
* Returns the oldClass => nil map for ignored weak-linked classes.
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
static objc::DenseMap<Class, Class> *remappedClasses(bool create)
{
static objc::LazyInitDenseMap<Class, Class> remapped_class_map;
runtimeLock.assertLocked();
// start big enough to hold CF's classes and a few others
return remapped_class_map.get(create, 32);
}
/***********************************************************************
* noClassesRemapped
* Returns YES if no classes have been remapped
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
static bool noClassesRemapped(void)
{
runtimeLock.assertLocked();
bool result = (remappedClasses(NO) == nil);
#if DEBUG
// Catch construction of an empty table, which defeats optimization.
auto *map = remappedClasses(NO);
if (map) ASSERT(map->size() > 0);
#endif
return result;
}
/***********************************************************************
* addRemappedClass
* newcls is a realized future class, replacing oldcls.
* OR newcls is nil, replacing ignored weak-linked class oldcls.
* Locking: runtimeLock must be write-locked by the caller
**********************************************************************/
static void addRemappedClass(Class oldcls, Class newcls)
{
runtimeLock.assertLocked();
if (PrintFuture) {
_objc_inform("FUTURE: using %p instead of %p for %s",
(void*)newcls, (void*)oldcls, oldcls->nameForLogging());
}
auto result = remappedClasses(YES)->insert({ oldcls, newcls });
#if DEBUG
if (!std::get<1>(result)) {
// An existing mapping was overwritten. This is not allowed
// unless it was to nil.
auto iterator = std::get<0>(result);
auto value = std::get<1>(*iterator);
ASSERT(value == nil);
}
#else
(void)result;
#endif
}
/***********************************************************************
* remapClass
* Returns the live class pointer for cls, which may be pointing to
* a class struct that has been reallocated.
* Returns nil if cls is ignored because of weak linking.
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
static Class remapClass(Class cls)
{
runtimeLock.assertLocked();
if (!cls) return nil;
auto *map = remappedClasses(NO);
if (!map)
return cls;
auto iterator = map->find(cls);
if (iterator == map->end())
return cls;
return std::get<1>(*iterator);
}
static Class remapClass(classref_t cls)
{
return remapClass((Class)cls);
}
Class _class_remap(Class cls)
{
mutex_locker_t lock(runtimeLock);
return remapClass(cls);
}
/***********************************************************************
* remapClassRef
* Fix up a class ref, in case the class referenced has been reallocated
* or is an ignored weak-linked class.
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
static void remapClassRef(Class *clsref)
{
runtimeLock.assertLocked();
Class newcls = remapClass(*clsref);
if (*clsref != newcls) *clsref = newcls;
}
_Nullable Class
objc_loadClassref(_Nullable Class * _Nonnull clsref)
{
auto *atomicClsref = explicit_atomic<uintptr_t>::from_pointer((uintptr_t *)clsref);
uintptr_t cls = atomicClsref->load(std::memory_order_relaxed);
if (fastpath((cls & 1) == 0))
return (Class)cls;
auto stub = (stub_class_t *)(cls & ~1ULL);
Class initialized = stub->initializer((Class)stub, nil);
atomicClsref->store((uintptr_t)initialized, std::memory_order_relaxed);
return initialized;
}
/***********************************************************************
* getMaybeUnrealizedNonMetaClass
* Return the ordinary class for this class or metaclass.
* `inst` is an instance of `cls` or a subclass thereof, or nil.
* Non-nil inst is faster.
* The result may be unrealized.
* Used by +initialize.
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
static Class getMaybeUnrealizedNonMetaClass(Class metacls, id inst)
{
static int total, named, secondary, sharedcache, dyld3;
runtimeLock.assertLocked();
ASSERT(metacls->isRealized());
total++;
// return cls itself if it's already a non-meta class
if (!metacls->isMetaClass()) return metacls;
// metacls really is a metaclass
// which means inst (if any) is a class
// special case for root metaclass
// where inst == inst->ISA() == metacls is possible
if (metacls->ISA() == metacls) {
Class cls = metacls->getSuperclass();
ASSERT(cls->isRealized());
ASSERT(!cls->isMetaClass());
ASSERT(cls->ISA() == metacls);
if (cls->ISA() == metacls) return cls;
}
// use inst if available
if (inst) {
Class cls = remapClass((Class)inst);
// cls may be a subclass - find the real class for metacls
// fixme this probably stops working once Swift starts
// reallocating classes if cls is unrealized.
while (cls) {
if (cls->ISA() == metacls) {
ASSERT(!cls->isMetaClassMaybeUnrealized());
return cls;
}
cls = cls->getSuperclass();
}
#if DEBUG
_objc_fatal("cls is not an instance of metacls");
#else
// release build: be forgiving and fall through to slow lookups
#endif
}
// See if the metaclass has a pointer to its nonmetaclass.
if (Class cls = metacls->bits.safe_ro()->getNonMetaclass())
return cls;
// try name lookup
{
Class cls = getClassExceptSomeSwift(metacls->mangledName());
if (cls && cls->ISA() == metacls) {
named++;
if (PrintInitializing) {
_objc_inform("INITIALIZE: %d/%d (%g%%) "
"successful by-name metaclass lookups",
named, total, named*100.0/total);
}
return cls;
}
}
// try secondary table
{
Class cls = (Class)NXMapGet(nonMetaClasses(), metacls);
if (cls) {
secondary++;
if (PrintInitializing) {
_objc_inform("INITIALIZE: %d/%d (%g%%) "
"successful secondary metaclass lookups",
secondary, total, secondary*100.0/total);
}
ASSERT(cls->ISA() == metacls);
return cls;
}
}
// try the dyld closure table
if (isPreoptimized())
{
// Try table from dyld closure first. It was built to ignore the dupes it
// knows will come from the cache, so anything left in here was there when
// we launched
Class cls = nil;
// Note, we have to pass the lambda directly here as otherwise we would try
// message copy and autorelease.
_dyld_for_each_objc_class(metacls->mangledName(),
[&cls, metacls](void* classPtr, bool isLoaded, bool* stop) {
// Skip images which aren't loaded. This supports the case where dyld
// might soft link an image from the main binary so its possibly not
// loaded yet.
if (!isLoaded)
return;
// Found a loaded image with this class name, so check if its the right one
Class result = (Class)classPtr;
if (result->ISA() == metacls) {
cls = result;
*stop = true;
}
});
if (cls) {
dyld3++;
if (PrintInitializing) {
_objc_inform("INITIALIZE: %d/%d (%g%%) "
"successful dyld closure metaclass lookups",
dyld3, total, dyld3*100.0/total);
}
return cls;
}
}
// try any duplicates in the dyld shared cache
{
Class cls = nil;
int count;
Class *classes = copyPreoptimizedClasses(metacls->mangledName(),&count);
if (classes) {
for (int i = 0; i < count; i++) {
if (classes[i]->ISA() == metacls) {
cls = classes[i];
break;
}
}
free(classes);
}
if (cls) {
sharedcache++;
if (PrintInitializing) {
_objc_inform("INITIALIZE: %d/%d (%g%%) "
"successful shared cache metaclass lookups",
sharedcache, total, sharedcache*100.0/total);
}
return cls;
}
}
_objc_fatal("no class for metaclass %p", (void*)metacls);
}
/***********************************************************************
* class_initialize. Send the '+initialize' message on demand to any
* uninitialized class. Force initialization of superclasses first.
* inst is an instance of cls, or nil. Non-nil is better for performance.
* Returns the class pointer. If the class was unrealized then
* it may be reallocated.
* Locking:
* runtimeLock must be held by the caller
* This function may drop the lock.
* On exit the lock is re-acquired or dropped as requested by leaveLocked.
**********************************************************************/
static Class initializeAndMaybeRelock(Class cls, id inst,
mutex_t& lock, bool leaveLocked)
{
lock.assertLocked();
ASSERT(cls->isRealized());
if (cls->isInitialized()) {
if (!leaveLocked) lock.unlock();
return cls;
}
// Find the non-meta class for cls, if it is not already one.
// The +initialize message is sent to the non-meta class object.
Class nonmeta = getMaybeUnrealizedNonMetaClass(cls, inst);
// Realize the non-meta class if necessary.
if (nonmeta->isRealized()) {
// nonmeta is cls, which was already realized
// OR nonmeta is distinct, but is already realized
// - nothing else to do
lock.unlock();
} else {
nonmeta = realizeClassMaybeSwiftAndUnlock(nonmeta, lock);
// runtimeLock is now unlocked
// fixme Swift can't relocate the class today,
// but someday it will:
cls = object_getClass(nonmeta);
}
// runtimeLock is now unlocked, for +initialize dispatch
ASSERT(nonmeta->isRealized());
initializeNonMetaClass(nonmeta);
if (leaveLocked) runtimeLock.lock();
return cls;
}
// Locking: acquires runtimeLock
Class class_initialize(Class cls, id obj)
{
runtimeLock.lock();
return initializeAndMaybeRelock(cls, obj, runtimeLock, false);
}
// Locking: caller must hold runtimeLock; this may drop and re-acquire it
static Class initializeAndLeaveLocked(Class cls, id obj, mutex_t& lock)
{
return initializeAndMaybeRelock(cls, obj, lock, true);
}
/***********************************************************************
* addRootClass
* Adds cls as a new realized root class.
* Locking: runtimeLock must be held by the caller.
**********************************************************************/
static void addRootClass(Class cls)
{
runtimeLock.assertLocked();
ASSERT(cls->isRealized());
objc_debug_realized_class_generation_count++;
cls->data()->nextSiblingClass = _firstRealizedClass;
_firstRealizedClass = cls;
}
static void removeRootClass(Class cls)
{
runtimeLock.assertLocked();
objc_debug_realized_class_generation_count++;
Class *classp;
for (classp = &_firstRealizedClass;
*classp != cls;
classp = &(*classp)->data()->nextSiblingClass)
{ }
*classp = (*classp)->data()->nextSiblingClass;
}
/***********************************************************************
* addSubclass
* Adds subcls as a subclass of supercls.
* Locking: runtimeLock must be held by the caller.
**********************************************************************/
static void addSubclass(Class supercls, Class subcls)
{
runtimeLock.assertLocked();
if (supercls && subcls) {
ASSERT(supercls->isRealized());
ASSERT(subcls->isRealized());
objc_debug_realized_class_generation_count++;
subcls->data()->nextSiblingClass = supercls->data()->firstSubclass;
supercls->data()->firstSubclass = subcls;
if (supercls->hasCxxCtor()) {
subcls->setHasCxxCtor();
}
if (supercls->hasCxxDtor()) {
subcls->setHasCxxDtor();
}
objc::AWZScanner::scanAddedSubClass(subcls, supercls);
objc::RRScanner::scanAddedSubClass(subcls, supercls);
objc::CoreScanner::scanAddedSubClass(subcls, supercls);
if (!supercls->allowsPreoptCaches()) {
subcls->setDisallowPreoptCachesRecursively(__func__);
} else if (!supercls->allowsPreoptInlinedSels()) {
subcls->setDisallowPreoptInlinedSelsRecursively(__func__);
}
// Special case: instancesRequireRawIsa does not propagate
// from root class to root metaclass
if (supercls->instancesRequireRawIsa() && supercls->getSuperclass()) {
subcls->setInstancesRequireRawIsaRecursively(true);
}
}
}
/***********************************************************************
* removeSubclass
* Removes subcls as a subclass of supercls.
* Locking: runtimeLock must be held by the caller.
**********************************************************************/
static void removeSubclass(Class supercls, Class subcls)
{
runtimeLock.assertLocked();
ASSERT(supercls->isRealized());
ASSERT(subcls->isRealized());
ASSERT(subcls->getSuperclass() == supercls);
objc_debug_realized_class_generation_count++;
Class *cp;
for (cp = &supercls->data()->firstSubclass;
*cp && *cp != subcls;
cp = &(*cp)->data()->nextSiblingClass)
;
ASSERT(*cp == subcls);
*cp = subcls->data()->nextSiblingClass;
}
/***********************************************************************
* protocols
* Returns the protocol name => protocol map for protocols.
* Locking: runtimeLock must read- or write-locked by the caller
**********************************************************************/
static NXMapTable *protocols(void)
{
static NXMapTable *protocol_map = nil;
runtimeLock.assertLocked();
INIT_ONCE_PTR(protocol_map,
NXCreateMapTable(NXStrValueMapPrototype, 16),
NXFreeMapTable(v) );
return protocol_map;
}
/***********************************************************************
* getProtocol
* Looks up a protocol by name. Demangled Swift names are recognized.
* Locking: runtimeLock must be read- or write-locked by the caller.
**********************************************************************/
static NEVER_INLINE Protocol *getProtocol(const char *name)
{
runtimeLock.assertLocked();
// Try name as-is.
Protocol *result = (Protocol *)NXMapGet(protocols(), name);
if (result) return result;
// Try table from dyld3 closure and dyld shared cache
result = getPreoptimizedProtocol(name);
if (result) return result;
// Try Swift-mangled equivalent of the given name.
if (char *swName = copySwiftV1MangledName(name, true/*isProtocol*/)) {
result = (Protocol *)NXMapGet(protocols(), swName);
// Try table from dyld3 closure and dyld shared cache
if (!result)
result = getPreoptimizedProtocol(swName);
free(swName);
return result;
}
return nullptr;
}
/***********************************************************************
* remapProtocol
* Returns the live protocol pointer for proto, which may be pointing to
* a protocol struct that has been reallocated.
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
static ALWAYS_INLINE protocol_t *remapProtocol(protocol_ref_t proto)
{
runtimeLock.assertLocked();
// Protocols in shared cache images have a canonical bit to mark that they
// are the definition we should use
if (((protocol_t *)proto)->isCanonical())
return (protocol_t *)proto;
protocol_t *newproto = (protocol_t *)
getProtocol(((protocol_t *)proto)->mangledName);
return newproto ? newproto : (protocol_t *)proto;
}
/***********************************************************************
* remapProtocolRef
* Fix up a protocol ref, in case the protocol referenced has been reallocated.
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
static size_t UnfixedProtocolReferences;
static void remapProtocolRef(protocol_t **protoref)
{
runtimeLock.assertLocked();
protocol_t *newproto = remapProtocol((protocol_ref_t)*protoref);
if (*protoref != newproto) {
*protoref = newproto;
UnfixedProtocolReferences++;
}
}
/***********************************************************************
* moveIvars
* Slides a class's ivars to accommodate the given superclass size.
* Ivars are NOT compacted to compensate for a superclass that shrunk.
* Locking: runtimeLock must be held by the caller.
**********************************************************************/
static void moveIvars(class_ro_t *ro, uint32_t superSize)
{
runtimeLock.assertLocked();
uint32_t diff;
ASSERT(superSize > ro->instanceStart);
diff = superSize - ro->instanceStart;
if (ro->ivars) {
// Find maximum alignment in this class's ivars
uint32_t maxAlignment = 1;
for (const auto& ivar : *ro->ivars) {
if (!ivar.offset) continue; // anonymous bitfield
uint32_t alignment = ivar.alignment();
if (alignment > maxAlignment) maxAlignment = alignment;
}
// Compute a slide value that preserves that alignment
uint32_t alignMask = maxAlignment - 1;
diff = (diff + alignMask) & ~alignMask;
// Slide all of this class's ivars en masse
for (const auto& ivar : *ro->ivars) {
if (!ivar.offset) continue; // anonymous bitfield
uint32_t oldOffset = (uint32_t)*ivar.offset;
uint32_t newOffset = oldOffset + diff;
*ivar.offset = newOffset;
if (PrintIvars) {
_objc_inform("IVARS: offset %u -> %u for %s "
"(size %u, align %u)",
oldOffset, newOffset, ivar.name,
ivar.size, ivar.alignment());
}
}
}
*(uint32_t *)&ro->instanceStart += diff;
*(uint32_t *)&ro->instanceSize += diff;
}
static void reconcileInstanceVariables(Class cls, Class supercls, const class_ro_t*& ro)
{
class_rw_t *rw = cls->data();
ASSERT(supercls);
ASSERT(!cls->isMetaClass());
/* debug: print them all before sliding
if (ro->ivars) {
for (const auto& ivar : *ro->ivars) {
if (!ivar.offset) continue; // anonymous bitfield
_objc_inform("IVARS: %s.%s (offset %u, size %u, align %u)",
ro->name, ivar.name,
*ivar.offset, ivar.size, ivar.alignment());
}
}
*/
// Non-fragile ivars - reconcile this class with its superclass
const class_ro_t *super_ro = supercls->data()->ro();
if (DebugNonFragileIvars) {
// Debugging: Force non-fragile ivars to slide.
// Intended to find compiler, runtime, and program bugs.
// If it fails with this and works without, you have a problem.
// Operation: Reset everything to 0 + misalignment.
// Then force the normal sliding logic to push everything back.
// Exceptions: root classes, metaclasses, *NSCF* classes,
// __CF* classes, NSConstantString, NSSimpleCString
// (already know it's not root because supercls != nil)
const char *clsname = cls->mangledName();
if (!strstr(clsname, "NSCF") &&
0 != strncmp(clsname, "__CF", 4) &&
0 != strcmp(clsname, "NSConstantString") &&
0 != strcmp(clsname, "NSSimpleCString"))
{
uint32_t oldStart = ro->instanceStart;
class_ro_t *ro_w = make_ro_writeable(rw);
ro = rw->ro();
// Find max ivar alignment in class.
// default to word size to simplify ivar update
uint32_t alignment = 1<<WORD_SHIFT;
if (ro->ivars) {
for (const auto& ivar : *ro->ivars) {
if (ivar.alignment() > alignment) {
alignment = ivar.alignment();
}
}
}
uint32_t misalignment = ro->instanceStart % alignment;
uint32_t delta = ro->instanceStart - misalignment;
ro_w->instanceStart = misalignment;
ro_w->instanceSize -= delta;
if (PrintIvars) {
_objc_inform("IVARS: DEBUG: forcing ivars for class '%s' "
"to slide (instanceStart %zu -> %zu)",
cls->nameForLogging(), (size_t)oldStart,
(size_t)ro->instanceStart);
}
if (ro->ivars) {
for (const auto& ivar : *ro->ivars) {
if (!ivar.offset) continue; // anonymous bitfield
*ivar.offset -= delta;
}
}
}
}
if (ro->instanceStart >= super_ro->instanceSize) {
// Superclass has not overgrown its space. We're done here.
return;
}
// fixme can optimize for "class has no new ivars", etc
if (ro->instanceStart < super_ro->instanceSize) {
// Superclass has changed size. This class's ivars must move.
// Also slide layout bits in parallel.
// This code is incapable of compacting the subclass to
// compensate for a superclass that shrunk, so don't do that.
if (PrintIvars) {
_objc_inform("IVARS: sliding ivars for class %s "
"(superclass was %u bytes, now %u)",
cls->nameForLogging(), ro->instanceStart,
super_ro->instanceSize);
}
class_ro_t *ro_w = make_ro_writeable(rw);
ro = rw->ro();
moveIvars(ro_w, super_ro->instanceSize);
gdb_objc_class_changed(cls, OBJC_CLASS_IVARS_CHANGED, ro->getName());
}
}
static void validateAlreadyRealizedClass(Class cls) {
ASSERT(cls->isRealized());
#if TARGET_OS_OSX
class_rw_t *rw = cls->data();
size_t rwSize = malloc_size(rw);
// Note: this check will need some adjustment if class_rw_t's
// size changes to not match the malloc bucket.
if (rwSize != sizeof(class_rw_t))
_objc_fatal("realized class %p has corrupt data pointer %p", cls, rw);
#endif
}
/***********************************************************************
* realizeClassWithoutSwift
* Performs first-time initialization on class cls,
* including allocating its read-write data.
* Does not perform any Swift-side initialization.
* Returns the real class structure for the class.
* Locking: runtimeLock must be write-locked by the caller
**********************************************************************/
static Class realizeClassWithoutSwift(Class cls, Class previously)
{
runtimeLock.assertLocked();
class_rw_t *rw;
Class supercls;
Class metacls;
if (!cls) return nil;
if (cls->isRealized()) {
validateAlreadyRealizedClass(cls);
return cls;
}
ASSERT(cls == remapClass(cls));
// fixme verify class is not in an un-dlopened part of the shared cache?
auto ro = (const class_ro_t *)cls->data();
auto isMeta = ro->flags & RO_META;
if (ro->flags & RO_FUTURE) {
// This was a future class. rw data is already allocated.
rw = cls->data();
ro = cls->data()->ro();
ASSERT(!isMeta);
cls->changeInfo(RW_REALIZED|RW_REALIZING, RW_FUTURE);
} else {
// Normal class. Allocate writeable class data.
rw = objc::zalloc<class_rw_t>();
rw->set_ro(ro);
rw->flags = RW_REALIZED|RW_REALIZING|isMeta;
cls->setData(rw);
}
cls->cache.initializeToEmptyOrPreoptimizedInDisguise();
#if FAST_CACHE_META
if (isMeta) cls->cache.setBit(FAST_CACHE_META);
#endif
// Choose an index for this class.
// Sets cls->instancesRequireRawIsa if indexes no more indexes are available
cls->chooseClassArrayIndex();
if (PrintConnecting) {
_objc_inform("CLASS: realizing class '%s'%s %p %p #%u %s%s",
cls->nameForLogging(), isMeta ? " (meta)" : "",
(void*)cls, ro, cls->classArrayIndex(),
cls->isSwiftStable() ? "(swift)" : "",
cls->isSwiftLegacy() ? "(pre-stable swift)" : "");
}
// Realize superclass and metaclass, if they aren't already.
// This needs to be done after RW_REALIZED is set above, for root classes.
// This needs to be done after class index is chosen, for root metaclasses.
// This assumes that none of those classes have Swift contents,
// or that Swift's initializers have already been called.
// fixme that assumption will be wrong if we add support
// for ObjC subclasses of Swift classes.
supercls = realizeClassWithoutSwift(remapClass(cls->getSuperclass()), nil);
metacls = realizeClassWithoutSwift(remapClass(cls->ISA()), nil);
#if SUPPORT_NONPOINTER_ISA
if (isMeta) {
// Metaclasses do not need any features from non pointer ISA
// This allows for a faspath for classes in objc_retain/objc_release.
cls->setInstancesRequireRawIsa();
} else {
// Disable non-pointer isa for some classes and/or platforms.
// Set instancesRequireRawIsa.
bool instancesRequireRawIsa = cls->instancesRequireRawIsa();
bool rawIsaIsInherited = false;
static bool hackedDispatch = false;
if (DisableNonpointerIsa) {
// Non-pointer isa disabled by environment or app SDK version
instancesRequireRawIsa = true;
}
else if (!hackedDispatch && 0 == strcmp(ro->getName(), "OS_object"))
{
// hack for libdispatch et al - isa also acts as vtable pointer
hackedDispatch = true;
instancesRequireRawIsa = true;
}
else if (supercls && supercls->getSuperclass() &&
supercls->instancesRequireRawIsa())
{
// This is also propagated by addSubclass()
// but nonpointer isa setup needs it earlier.
// Special case: instancesRequireRawIsa does not propagate
// from root class to root metaclass
instancesRequireRawIsa = true;
rawIsaIsInherited = true;
}
if (instancesRequireRawIsa) {
cls->setInstancesRequireRawIsaRecursively(rawIsaIsInherited);
}
}
// SUPPORT_NONPOINTER_ISA
#endif
// Update superclass and metaclass in case of remapping
cls->setSuperclass(supercls);
cls->initClassIsa(metacls);
// Reconcile instance variable offsets / layout.
// This may reallocate class_ro_t, updating our ro variable.
if (supercls && !isMeta) reconcileInstanceVariables(cls, supercls, ro);
// Set fastInstanceSize if it wasn't set already.
cls->setInstanceSize(ro->instanceSize);
// Copy some flags from ro to rw
if (ro->flags & RO_HAS_CXX_STRUCTORS) {
cls->setHasCxxDtor();
if (! (ro->flags & RO_HAS_CXX_DTOR_ONLY)) {
cls->setHasCxxCtor();
}
}
// Propagate the associated objects forbidden flag from ro or from
// the superclass.
if ((ro->flags & RO_FORBIDS_ASSOCIATED_OBJECTS) ||
(supercls && supercls->forbidsAssociatedObjects()))
{
rw->flags |= RW_FORBIDS_ASSOCIATED_OBJECTS;
}
// Connect this class to its superclass's subclass lists
if (supercls) {
addSubclass(supercls, cls);
} else {
addRootClass(cls);
}
// Attach categories
methodizeClass(cls, previously);
return cls;
}
/***********************************************************************
* _objc_realizeClassFromSwift
* Called by Swift when it needs the ObjC part of a class to be realized.
* There are four cases:
* 1. cls != nil; previously == cls
* Class cls is being realized in place
* 2. cls != nil; previously == nil
* Class cls is being constructed at runtime
* 3. cls != nil; previously != cls
* The class that was at previously has been reallocated to cls
* 4. cls == nil, previously != nil
* The class at previously is hereby disavowed
*
* Only variants #1 and #2 are supported today.
*
* Locking: acquires runtimeLock
**********************************************************************/
Class _objc_realizeClassFromSwift(Class cls, void *previously)
{
if (cls) {
if (previously && previously != (void*)cls) {
// #3: relocation
mutex_locker_t lock(runtimeLock);
addRemappedClass((Class)previously, cls);
addClassTableEntry(cls);
addNamedClass(cls, cls->mangledName(), /*replacing*/nil);
return realizeClassWithoutSwift(cls, (Class)previously);
} else {
// #1 and #2: realization in place, or new class
mutex_locker_t lock(runtimeLock);
if (!previously) {
// #2: new class
cls = readClass(cls, false/*bundle*/, false/*shared cache*/);
}
// #1 and #2: realization in place, or new class
// We ignore the Swift metadata initializer callback.
// We assume that's all handled since we're being called from Swift.
return realizeClassWithoutSwift(cls, nil);
}
}
else {
// #4: disavowal
// In the future this will mean remapping the old address to nil
// and if necessary removing the old address from any other tables.
_objc_fatal("Swift requested that class %p be ignored, "
"but libobjc does not support that.", previously);
}
}
/***********************************************************************
* realizeSwiftClass
* Performs first-time initialization on class cls,
* including allocating its read-write data,
* and any Swift-side initialization.
* Returns the real class structure for the class.
* Locking: acquires runtimeLock indirectly
**********************************************************************/
static Class realizeSwiftClass(Class cls)
{
runtimeLock.assertUnlocked();
// Some assumptions:
// * Metaclasses never have a Swift initializer.
// * Root classes never have a Swift initializer.
// (These two together avoid initialization order problems at the root.)
// * Unrealized non-Swift classes have no Swift ancestry.
// * Unrealized Swift classes with no initializer have no ancestry that
// does have the initializer.
// (These two together mean we don't need to scan superclasses here
// and we don't need to worry about Swift superclasses inside
// realizeClassWithoutSwift()).
// fixme some of these assumptions will be wrong
// if we add support for ObjC sublasses of Swift classes.
#if DEBUG
runtimeLock.lock();
ASSERT(remapClass(cls) == cls);
ASSERT(cls->isSwiftStable_ButAllowLegacyForNow());
ASSERT(!cls->isMetaClassMaybeUnrealized());
ASSERT(cls->getSuperclass());
runtimeLock.unlock();
#endif
// Look for a Swift metadata initialization function
// installed on the class. If it is present we call it.
// That function in turn initializes the Swift metadata,
// prepares the "compiler-generated" ObjC metadata if not
// already present, and calls _objc_realizeSwiftClass() to finish
// our own initialization.
if (auto init = cls->swiftMetadataInitializer()) {
if (PrintConnecting) {
_objc_inform("CLASS: calling Swift metadata initializer "
"for class '%s' (%p)", cls->nameForLogging(), cls);
}
Class newcls = init(cls, nil);
// fixme someday Swift will need to relocate classes at this point,
// but we don't accept that yet.
if (cls != newcls) {
mutex_locker_t lock(runtimeLock);
addRemappedClass(cls, newcls);
}
return newcls;
}
else {
// No Swift-side initialization callback.
// Perform our own realization directly.
mutex_locker_t lock(runtimeLock);
return realizeClassWithoutSwift(cls, nil);
}
}
/***********************************************************************
* realizeClassMaybeSwift (MaybeRelock / AndUnlock / AndLeaveLocked)
* Realize a class that might be a Swift class.
* Returns the real class structure for the class.
* Locking:
* runtimeLock must be held on entry
* runtimeLock may be dropped during execution
* ...AndUnlock function leaves runtimeLock unlocked on exit
* ...AndLeaveLocked re-acquires runtimeLock if it was dropped
* This complication avoids repeated lock transitions in some cases.
**********************************************************************/
static Class
realizeClassMaybeSwiftMaybeRelock(Class cls, mutex_t& lock, bool leaveLocked)
{
lock.assertLocked();
if (!cls->isSwiftStable_ButAllowLegacyForNow()) {
// Non-Swift class. Realize it now with the lock still held.
// fixme wrong in the future for objc subclasses of swift classes
realizeClassWithoutSwift(cls, nil);
if (!leaveLocked) lock.unlock();
} else {
// Swift class. We need to drop locks and call the Swift
// runtime to initialize it.
lock.unlock();
cls = realizeSwiftClass(cls);
ASSERT(cls->isRealized()); // callback must have provoked realization
if (leaveLocked) lock.lock();
}
return cls;
}
static Class
realizeClassMaybeSwiftAndUnlock(Class cls, mutex_t& lock)
{
return realizeClassMaybeSwiftMaybeRelock(cls, lock, false);
}
static Class
realizeClassMaybeSwiftAndLeaveLocked(Class cls, mutex_t& lock)
{
return realizeClassMaybeSwiftMaybeRelock(cls, lock, true);
}
/***********************************************************************
* missingWeakSuperclass
* Return YES if some superclass of cls was weak-linked and is missing.
**********************************************************************/
static bool
missingWeakSuperclass(Class cls)
{
ASSERT(!cls->isRealized());
if (!cls->getSuperclass()) {
// superclass nil. This is normal for root classes only.
return (!(cls->data()->flags & RO_ROOT));
} else {
// superclass not nil. Check if a higher superclass is missing.
Class supercls = remapClass(cls->getSuperclass());
ASSERT(cls != cls->getSuperclass());
ASSERT(cls != supercls);
if (!supercls) return YES;
if (supercls->isRealized()) return NO;
return missingWeakSuperclass(supercls);
}
}
/***********************************************************************
* realizeAllClassesInImage
* Non-lazily realizes all unrealized classes in the given image.
* Locking: runtimeLock must be held by the caller.
* Locking: this function may drop and re-acquire the lock.
**********************************************************************/
static void realizeAllClassesInImage(header_info *hi)
{
runtimeLock.assertLocked();
size_t count, i;
classref_t const *classlist;
if (hi->areAllClassesRealized()) return;
classlist = _getObjc2ClassList(hi, &count);
for (i = 0; i < count; i++) {
Class cls = remapClass(classlist[i]);
if (cls) {
realizeClassMaybeSwiftAndLeaveLocked(cls, runtimeLock);
}
}
hi->setAllClassesRealized(YES);
}
/***********************************************************************
* realizeAllClasses
* Non-lazily realizes all unrealized classes in all known images.
* Locking: runtimeLock must be held by the caller.
* Locking: this function may drop and re-acquire the lock.
* Dropping the lock makes this function thread-unsafe with respect
* to concurrent image unload, but the callers of this function
* already ultimately do something that is also thread-unsafe with
* respect to image unload (such as using the list of all classes).
**********************************************************************/
static void realizeAllClasses(void)
{
runtimeLock.assertLocked();
header_info *hi;
for (hi = FirstHeader; hi; hi = hi->getNext()) {
realizeAllClassesInImage(hi); // may drop and re-acquire runtimeLock
}
}
/***********************************************************************
* _objc_allocateFutureClass
* Allocate an unresolved future class for the given class name.
* Returns any existing allocation if one was already made.
* Assumes the named class doesn't exist yet.
* Locking: acquires runtimeLock
**********************************************************************/
Class _objc_allocateFutureClass(const char *name)
{
mutex_locker_t lock(runtimeLock);
Class cls;
NXMapTable *map = futureNamedClasses();
if ((cls = (Class)NXMapGet(map, name))) {
// Already have a future class for this name.
return cls;
}
cls = _calloc_class(sizeof(objc_class));
addFutureNamedClass(name, cls);
return cls;
}
/***********************************************************************
* objc_getFutureClass. Return the id of the named class.
* If the class does not exist, return an uninitialized class
* structure that will be used for the class when and if it
* does get loaded.
* Not thread safe.
**********************************************************************/
Class objc_getFutureClass(const char *name)
{
Class cls;
// YES unconnected, NO class handler
// (unconnected is OK because it will someday be the real class)
cls = look_up_class(name, YES, NO);
if (cls) {
if (PrintFuture) {
_objc_inform("FUTURE: found %p already in use for %s",
(void*)cls, name);
}
return cls;
}
// No class or future class with that name yet. Make one.
// fixme not thread-safe with respect to
// simultaneous library load or getFutureClass.
return _objc_allocateFutureClass(name);
}
BOOL _class_isFutureClass(Class cls)
{
return cls && cls->isFuture();
}
BOOL _class_isSwift(Class _Nullable cls)
{
return cls && cls->isSwiftStable();
}
/***********************************************************************
* _objc_flush_caches
* Flushes all caches.
* (Historical behavior: flush caches for cls, its metaclass,
* and subclasses thereof. Nil flushes all classes.)
* Locking: acquires runtimeLock
**********************************************************************/
static void flushCaches(Class cls, const char *func, bool (^predicate)(Class))
{
runtimeLock.assertLocked();
#if CONFIG_USE_CACHE_LOCK
mutex_locker_t lock(cacheUpdateLock);
#endif
const auto handler = ^(Class c) {
if (predicate(c)) {
c->cache.eraseNolock(func);
}
return true;
};
if (cls) {
foreach_realized_class_and_subclass(cls, handler);
} else {
foreach_realized_class_and_metaclass(handler);
}
}
void _objc_flush_caches(Class cls)
{
{
mutex_locker_t lock(runtimeLock);
flushCaches(cls, __func__, [](Class c){
return !c->cache.isConstantOptimizedCache();
});
if (cls && !cls->isMetaClass() && !cls->isRootClass()) {
flushCaches(cls->ISA(), __func__, [](Class c){
return !c->cache.isConstantOptimizedCache();
});
} else {
// cls is a root class or root metaclass. Its metaclass is itself
// or a subclass so the metaclass caches were already flushed.
}
}
if (!cls) {
// collectALot if cls==nil
#if CONFIG_USE_CACHE_LOCK
mutex_locker_t lock(cacheUpdateLock);
#else
mutex_locker_t lock(runtimeLock);
#endif
cache_t::collectNolock(true);
}
}
/***********************************************************************
* map_images
* Process the given images which are being mapped in by dyld.
* Calls ABI-agnostic code after taking ABI-specific locks.
*
* Locking: write-locks runtimeLock
**********************************************************************/
void
map_images(unsigned count, const char * const paths[],
const struct mach_header * const mhdrs[])
{
mutex_locker_t lock(runtimeLock);
return map_images_nolock(count, paths, mhdrs);
}
static void load_categories_nolock(header_info *hi) {
bool hasClassProperties = hi->info()->hasCategoryClassProperties();
size_t count;
auto processCatlist = [&](category_t * const *catlist) {
for (unsigned i = 0; i < count; i++) {
category_t *cat = catlist[i];
Class cls = remapClass(cat->cls);
locstamped_category_t lc{cat, hi};
if (!cls) {
// Category's target class is missing (probably weak-linked).
// Ignore the category.
if (PrintConnecting) {
_objc_inform("CLASS: IGNORING category \?\?\?(%s) %p with "
"missing weak-linked target class",
cat->name, cat);
}
continue;
}
// Process this category.
if (cls->isStubClass()) {
// Stub classes are never realized. Stub classes
// don't know their metaclass until they're
// initialized, so we have to add categories with
// class methods or properties to the stub itself.
// methodizeClass() will find them and add them to
// the metaclass as appropriate.
if (cat->instanceMethods ||
cat->protocols ||
cat->instanceProperties ||
cat->classMethods ||
cat->protocols ||
(hasClassProperties && cat->_classProperties))
{
objc::unattachedCategories.addForClass(lc, cls);
}
} else {
// First, register the category with its target class.
// Then, rebuild the class's method lists (etc) if
// the class is realized.
if (cat->instanceMethods || cat->protocols
|| cat->instanceProperties)
{
if (cls->isRealized()) {
attachCategories(cls, &lc, 1, ATTACH_EXISTING);
} else {
objc::unattachedCategories.addForClass(lc, cls);
}
}
if (cat->classMethods || cat->protocols
|| (hasClassProperties && cat->_classProperties))
{
if (cls->ISA()->isRealized()) {
attachCategories(cls->ISA(), &lc, 1, ATTACH_EXISTING | ATTACH_METACLASS);
} else {
objc::unattachedCategories.addForClass(lc, cls->ISA());
}
}
}
}
};
processCatlist(hi->catlist(&count));
processCatlist(hi->catlist2(&count));
}
static void loadAllCategories() {
mutex_locker_t lock(runtimeLock);
for (auto *hi = FirstHeader; hi != NULL; hi = hi->getNext()) {
load_categories_nolock(hi);
}
}
/***********************************************************************
* load_images
* Process +load in the given images which are being mapped in by dyld.
*
* Locking: write-locks runtimeLock and loadMethodLock
**********************************************************************/
extern bool hasLoadMethods(const headerType *mhdr);
extern void prepare_load_methods(const headerType *mhdr);
void
load_images(const char *path __unused, const struct mach_header *mh)
{
if (!didInitialAttachCategories && didCallDyldNotifyRegister) {
didInitialAttachCategories = true;
loadAllCategories();
}
// Return without taking locks if there are no +load methods here.
if (!hasLoadMethods((const headerType *)mh)) return;
recursive_mutex_locker_t lock(loadMethodLock);
// Discover load methods
{
mutex_locker_t lock2(runtimeLock);
prepare_load_methods((const headerType *)mh);
}
// Call +load methods (without runtimeLock - re-entrant)
call_load_methods();
}
/***********************************************************************
* unmap_image
* Process the given image which is about to be unmapped by dyld.
*
* Locking: write-locks runtimeLock and loadMethodLock
**********************************************************************/
void
unmap_image(const char *path __unused, const struct mach_header *mh)
{
recursive_mutex_locker_t lock(loadMethodLock);
mutex_locker_t lock2(runtimeLock);
unmap_image_nolock(mh);
}
/***********************************************************************
* mustReadClasses
* Preflight check in advance of readClass() from an image.
**********************************************************************/
bool mustReadClasses(header_info *hi, bool hasDyldRoots)
{
const char *reason;
// If the image is not preoptimized then we must read classes.
if (!hi->hasPreoptimizedClasses()) {
reason = nil; // Don't log this one because it is noisy.
goto readthem;
}
// If iOS simulator then we must read classes.
#if TARGET_OS_SIMULATOR
reason = "the image is for iOS simulator";
goto readthem;
#endif
ASSERT(!hi->isBundle()); // no MH_BUNDLE in shared cache
// If the image may have missing weak superclasses then we must read classes
if (!noMissingWeakSuperclasses() || hasDyldRoots) {
reason = "the image may contain classes with missing weak superclasses";
goto readthem;
}
// If there are unresolved future classes then we must read classes.
if (haveFutureNamedClasses()) {
reason = "there are unresolved future classes pending";
goto readthem;
}
// readClass() rewrites bits in backward-deploying Swift stable ABI code.
// The assumption here is there there are no such classes
// in the dyld shared cache.
#if DEBUG
{
size_t count;
classref_t const *classlist = _getObjc2ClassList(hi, &count);
for (size_t i = 0; i < count; i++) {
Class cls = remapClass(classlist[i]);
ASSERT(!cls->isUnfixedBackwardDeployingStableSwift());
}
}
#endif
// readClass() does not need to do anything.
return NO;
readthem:
if (PrintPreopt && reason) {
_objc_inform("PREOPTIMIZATION: reading classes manually from %s "
"because %s", hi->fname(), reason);
}
return YES;
}
/***********************************************************************
* readClass
* Read a class and metaclass as written by a compiler.
* Returns the new class pointer. This could be:
* - cls
* - nil (cls has a missing weak-linked superclass)
* - something else (space for this class was reserved by a future class)
*
* Note that all work performed by this function is preflighted by
* mustReadClasses(). Do not change this function without updating that one.
*
* Locking: runtimeLock acquired by map_images or objc_readClassPair
**********************************************************************/
Class readClass(Class cls, bool headerIsBundle, bool headerIsPreoptimized)
{
const char *mangledName = cls->nonlazyMangledName();
if (missingWeakSuperclass(cls)) {
// No superclass (probably weak-linked).
// Disavow any knowledge of this subclass.
if (PrintConnecting) {
_objc_inform("CLASS: IGNORING class '%s' with "
"missing weak-linked superclass",
cls->nameForLogging());
}
addRemappedClass(cls, nil);
cls->setSuperclass(nil);
return nil;
}
cls->fixupBackwardDeployingStableSwift();
Class replacing = nil;
if (mangledName != nullptr) {
if (Class newCls = popFutureNamedClass(mangledName)) {
// This name was previously allocated as a future class.
// Copy objc_class to future class's struct.
// Preserve future's rw data block.
if (newCls->isAnySwift()) {
_objc_fatal("Can't complete future class request for '%s' "
"because the real class is too big.",
cls->nameForLogging());
}
class_rw_t *rw = newCls->data();
const class_ro_t *old_ro = rw->ro();
memcpy(newCls, cls, sizeof(objc_class));
// Manually set address-discriminated ptrauthed fields
// so that newCls gets the correct signatures.
newCls->setSuperclass(cls->getSuperclass());
newCls->initIsa(cls->getIsa());
rw->set_ro((class_ro_t *)newCls->data());
newCls->setData(rw);
freeIfMutable((char *)old_ro->getName());
free((void *)old_ro);
addRemappedClass(cls, newCls);
replacing = cls;
cls = newCls;
}
}
if (headerIsPreoptimized && !replacing) {
// class list built in shared cache
// fixme strict assert doesn't work because of duplicates
// ASSERT(cls == getClass(name));
ASSERT(mangledName == nullptr || getClassExceptSomeSwift(mangledName));
} else {
if (mangledName) { //some Swift generic classes can lazily generate their names
addNamedClass(cls, mangledName, replacing);
} else {
Class meta = cls->ISA();
const class_ro_t *metaRO = meta->bits.safe_ro();
ASSERT(metaRO->getNonMetaclass() && "Metaclass with lazy name must have a pointer to the corresponding nonmetaclass.");
ASSERT(metaRO->getNonMetaclass() == cls && "Metaclass nonmetaclass pointer must equal the original class.");
}
addClassTableEntry(cls);
}
// for future reference: shared cache never contains MH_BUNDLEs
if (headerIsBundle) {
cls->data()->flags |= RO_FROM_BUNDLE;
cls->ISA()->data()->flags |= RO_FROM_BUNDLE;
}
return cls;
}
/***********************************************************************
* readProtocol
* Read a protocol as written by a compiler.
**********************************************************************/
static void
readProtocol(protocol_t *newproto, Class protocol_class,
NXMapTable *protocol_map,
bool headerIsPreoptimized, bool headerIsBundle)
{
// This is not enough to make protocols in unloaded bundles safe,
// but it does prevent crashes when looking up unrelated protocols.
auto insertFn = headerIsBundle ? NXMapKeyCopyingInsert : NXMapInsert;
protocol_t *oldproto = (protocol_t *)getProtocol(newproto->mangledName);
if (oldproto) {
if (oldproto != newproto) {
// Some other definition already won.
if (PrintProtocols) {
_objc_inform("PROTOCOLS: protocol at %p is %s "
"(duplicate of %p)",
newproto, oldproto->nameForLogging(), oldproto);
}
// If we are a shared cache binary then we have a definition of this
// protocol, but if another one was chosen then we need to clear our
// isCanonical bit so that no-one trusts it.
// Note, if getProtocol returned a shared cache protocol then the
// canonical definition is already in the shared cache and we don't
// need to do anything.
if (headerIsPreoptimized && !oldproto->isCanonical()) {
// Note newproto is an entry in our __objc_protolist section which
// for shared cache binaries points to the original protocol in
// that binary, not the shared cache uniqued one.
auto cacheproto = (protocol_t *)
getSharedCachePreoptimizedProtocol(newproto->mangledName);
if (cacheproto && cacheproto->isCanonical())
cacheproto->clearIsCanonical();
}
}
}
else if (headerIsPreoptimized) {
// Shared cache initialized the protocol object itself,
// but in order to allow out-of-cache replacement we need
// to add it to the protocol table now.
protocol_t *cacheproto = (protocol_t *)
getPreoptimizedProtocol(newproto->mangledName);
protocol_t *installedproto;
if (cacheproto && cacheproto != newproto) {
// Another definition in the shared cache wins (because
// everything in the cache was fixed up to point to it).
installedproto = cacheproto;
}
else {
// This definition wins.
installedproto = newproto;
}
ASSERT(installedproto->getIsa() == protocol_class);
ASSERT(installedproto->size >= sizeof(protocol_t));
insertFn(protocol_map, installedproto->mangledName,
installedproto);
if (PrintProtocols) {
_objc_inform("PROTOCOLS: protocol at %p is %s",
installedproto, installedproto->nameForLogging());
if (newproto != installedproto) {
_objc_inform("PROTOCOLS: protocol at %p is %s "
"(duplicate of %p)",
newproto, installedproto->nameForLogging(),
installedproto);
}
}
}
else {
// New protocol from an un-preoptimized image. Fix it up in place.
// fixme duplicate protocols from unloadable bundle
newproto->initIsa(protocol_class); // fixme pinned
insertFn(protocol_map, newproto->mangledName, newproto);
if (PrintProtocols) {
_objc_inform("PROTOCOLS: protocol at %p is %s",
newproto, newproto->nameForLogging());
}
}
}
/***********************************************************************
* _read_images
* Perform initial processing of the headers in the linked
* list beginning with headerList.
*
* Called by: map_images_nolock
*
* Locking: runtimeLock acquired by map_images
**********************************************************************/
void _read_images(header_info **hList, uint32_t hCount, int totalClasses, int unoptimizedTotalClasses)
{
header_info *hi;
uint32_t hIndex;
size_t count;
size_t i;
Class *resolvedFutureClasses = nil;
size_t resolvedFutureClassCount = 0;
static bool doneOnce;
bool launchTime = NO;
TimeLogger ts(PrintImageTimes);
runtimeLock.assertLocked();
#define EACH_HEADER \
hIndex = 0; \
hIndex < hCount && (hi = hList[hIndex]); \
hIndex++
if (!doneOnce) {
doneOnce = YES;
launchTime = YES;
#if SUPPORT_NONPOINTER_ISA
// Disable non-pointer isa under some conditions.
# if SUPPORT_INDEXED_ISA
// Disable nonpointer isa if any image contains old Swift code
for (EACH_HEADER) {
if (hi->info()->containsSwift() &&
hi->info()->swiftUnstableVersion() < objc_image_info::SwiftVersion3)
{
DisableNonpointerIsa = true;
if (PrintRawIsa) {
_objc_inform("RAW ISA: disabling non-pointer isa because "
"the app or a framework contains Swift code "
"older than Swift 3.0");
}
break;
}
}
# endif
# if TARGET_OS_OSX
// Disable non-pointer isa if the app is too old
// (linked before OS X 10.11)
// if (!dyld_program_sdk_at_least(dyld_platform_version_macOS_10_11)) {
// DisableNonpointerIsa = true;
// if (PrintRawIsa) {
// _objc_inform("RAW ISA: disabling non-pointer isa because "
// "the app is too old.");
// }
// }
// Disable non-pointer isa if the app has a __DATA,__objc_rawisa section
// New apps that load old extensions may need this.
for (EACH_HEADER) {
if (hi->mhdr()->filetype != MH_EXECUTE) continue;
unsigned long size;
if (getsectiondata(hi->mhdr(), "__DATA", "__objc_rawisa", &size)) {
DisableNonpointerIsa = true;
if (PrintRawIsa) {
_objc_inform("RAW ISA: disabling non-pointer isa because "
"the app has a __DATA,__objc_rawisa section");
}
}
break; // assume only one MH_EXECUTE image
}
# endif
#endif
if (DisableTaggedPointers) {
disableTaggedPointers();
}
initializeTaggedPointerObfuscator();
if (PrintConnecting) {
_objc_inform("CLASS: found %d classes during launch", totalClasses);
}
// namedClasses
// Preoptimized classes don't go in this table.
// 4/3 is NXMapTable's load factor
int namedClassesSize =
(isPreoptimized() ? unoptimizedTotalClasses : totalClasses) * 4 / 3;
gdb_objc_realized_classes =
NXCreateMapTable(NXStrValueMapPrototype, namedClassesSize);
ts.log("IMAGE TIMES: first time tasks");
}
// Fix up @selector references
static size_t UnfixedSelectors;
{
mutex_locker_t lock(selLock);
for (EACH_HEADER) {
if (hi->hasPreoptimizedSelectors()) continue;
bool isBundle = hi->isBundle();
SEL *sels = _getObjc2SelectorRefs(hi, &count);
UnfixedSelectors += count;
for (i = 0; i < count; i++) {
const char *name = sel_cname(sels[i]);
SEL sel = sel_registerNameNoLock(name, isBundle);
if (sels[i] != sel) {
sels[i] = sel;
}
}
}
}
ts.log("IMAGE TIMES: fix up selector references");
// Discover classes. Fix up unresolved future classes. Mark bundle classes.
bool hasDyldRoots = dyld_shared_cache_some_image_overridden();
for (EACH_HEADER) {
if (! mustReadClasses(hi, hasDyldRoots)) {
// Image is sufficiently optimized that we need not call readClass()
continue;
}
classref_t const *classlist = _getObjc2ClassList(hi, &count);
bool headerIsBundle = hi->isBundle();
bool headerIsPreoptimized = hi->hasPreoptimizedClasses();
for (i = 0; i < count; i++) {
Class cls = (Class)classlist[i];
Class newCls = readClass(cls, headerIsBundle, headerIsPreoptimized);
if (newCls != cls && newCls) {
// Class was moved but not deleted. Currently this occurs
// only when the new class resolved a future class.
// Non-lazily realize the class below.
resolvedFutureClasses = (Class *)
realloc(resolvedFutureClasses,
(resolvedFutureClassCount+1) * sizeof(Class));
resolvedFutureClasses[resolvedFutureClassCount++] = newCls;
}
}
}
ts.log("IMAGE TIMES: discover classes");
// Fix up remapped classes
// Class list and nonlazy class list remain unremapped.
// Class refs and super refs are remapped for message dispatching.
if (!noClassesRemapped()) {
for (EACH_HEADER) {
Class *classrefs = _getObjc2ClassRefs(hi, &count);
for (i = 0; i < count; i++) {
remapClassRef(&classrefs[i]);
}
// fixme why doesn't test future1 catch the absence of this?
classrefs = _getObjc2SuperRefs(hi, &count);
for (i = 0; i < count; i++) {
remapClassRef(&classrefs[i]);
}
}
}
ts.log("IMAGE TIMES: remap classes");
#if SUPPORT_FIXUP
// Fix up old objc_msgSend_fixup call sites
for (EACH_HEADER) {
message_ref_t *refs = _getObjc2MessageRefs(hi, &count);
if (count == 0) continue;
if (PrintVtables) {
_objc_inform("VTABLES: repairing %zu unsupported vtable dispatch "
"call sites in %s", count, hi->fname());
}
for (i = 0; i < count; i++) {
fixupMessageRef(refs+i);
}
}
ts.log("IMAGE TIMES: fix up objc_msgSend_fixup");
#endif
// Discover protocols. Fix up protocol refs.
for (EACH_HEADER) {
extern objc_class OBJC_CLASS_$_Protocol;
Class cls = (Class)&OBJC_CLASS_$_Protocol;
ASSERT(cls);
NXMapTable *protocol_map = protocols();
bool isPreoptimized = hi->hasPreoptimizedProtocols();
// Skip reading protocols if this is an image from the shared cache
// and we support roots
// Note, after launch we do need to walk the protocol as the protocol
// in the shared cache is marked with isCanonical() and that may not
// be true if some non-shared cache binary was chosen as the canonical
// definition
if (launchTime && isPreoptimized) {
if (PrintProtocols) {
_objc_inform("PROTOCOLS: Skipping reading protocols in image: %s",
hi->fname());
}
continue;
}
bool isBundle = hi->isBundle();
protocol_t * const *protolist = _getObjc2ProtocolList(hi, &count);
for (i = 0; i < count; i++) {
readProtocol(protolist[i], cls, protocol_map,
isPreoptimized, isBundle);
}
}
ts.log("IMAGE TIMES: discover protocols");
// Fix up @protocol references
// Preoptimized images may have the right
// answer already but we don't know for sure.
for (EACH_HEADER) {
// At launch time, we know preoptimized image refs are pointing at the
// shared cache definition of a protocol. We can skip the check on
// launch, but have to visit @protocol refs for shared cache images
// loaded later.
if (launchTime && hi->isPreoptimized())
continue;
protocol_t **protolist = _getObjc2ProtocolRefs(hi, &count);
for (i = 0; i < count; i++) {
remapProtocolRef(&protolist[i]);
}
}
ts.log("IMAGE TIMES: fix up @protocol references");
// Discover categories. Only do this after the initial category
// attachment has been done. For categories present at startup,
// discovery is deferred until the first load_images call after
// the call to _dyld_objc_notify_register completes. rdar://problem/53119145
if (didInitialAttachCategories) {
for (EACH_HEADER) {
load_categories_nolock(hi);
}
}
ts.log("IMAGE TIMES: discover categories");
// Category discovery MUST BE Late to avoid potential races
// when other threads call the new category code before
// this thread finishes its fixups.
// +load handled by prepare_load_methods()
// Realize non-lazy classes (for +load methods and static instances)
for (EACH_HEADER) {
classref_t const *classlist = hi->nlclslist(&count);
for (i = 0; i < count; i++) {
Class cls = remapClass(classlist[i]);
if (!cls) continue;
addClassTableEntry(cls);
if (cls->isSwiftStable()) {
if (cls->swiftMetadataInitializer()) {
_objc_fatal("Swift class %s with a metadata initializer "
"is not allowed to be non-lazy",
cls->nameForLogging());
}
// fixme also disallow relocatable classes
// We can't disallow all Swift classes because of
// classes like Swift.__EmptyArrayStorage
}
realizeClassWithoutSwift(cls, nil);
}
}
ts.log("IMAGE TIMES: realize non-lazy classes");
// Realize newly-resolved future classes, in case CF manipulates them
if (resolvedFutureClasses) {
for (i = 0; i < resolvedFutureClassCount; i++) {
Class cls = resolvedFutureClasses[i];
if (cls->isSwiftStable()) {
_objc_fatal("Swift class is not allowed to be future");
}
realizeClassWithoutSwift(cls, nil);
cls->setInstancesRequireRawIsaRecursively(false/*inherited*/);
}
free(resolvedFutureClasses);
}
ts.log("IMAGE TIMES: realize future classes");
if (DebugNonFragileIvars) {
realizeAllClasses();
}
// Print preoptimization statistics
if (PrintPreopt) {
static unsigned int PreoptTotalMethodLists;
static unsigned int PreoptOptimizedMethodLists;
static unsigned int PreoptTotalClasses;
static unsigned int PreoptOptimizedClasses;
for (EACH_HEADER) {
if (hi->hasPreoptimizedSelectors()) {
_objc_inform("PREOPTIMIZATION: honoring preoptimized selectors "
"in %s", hi->fname());
}
else if (hi->info()->optimizedByDyld()) {
_objc_inform("PREOPTIMIZATION: IGNORING preoptimized selectors "
"in %s", hi->fname());
}
classref_t const *classlist = _getObjc2ClassList(hi, &count);
for (i = 0; i < count; i++) {
Class cls = remapClass(classlist[i]);
if (!cls) continue;
PreoptTotalClasses++;
if (hi->hasPreoptimizedClasses()) {
PreoptOptimizedClasses++;
}
const method_list_t *mlist;
if ((mlist = cls->bits.safe_ro()->baseMethods())) {
PreoptTotalMethodLists++;
if (mlist->isFixedUp()) {
PreoptOptimizedMethodLists++;
}
}
if ((mlist = cls->ISA()->bits.safe_ro()->baseMethods())) {
PreoptTotalMethodLists++;
if (mlist->isFixedUp()) {
PreoptOptimizedMethodLists++;
}
}
}
}
_objc_inform("PREOPTIMIZATION: %zu selector references not "
"pre-optimized", UnfixedSelectors);
_objc_inform("PREOPTIMIZATION: %u/%u (%.3g%%) method lists pre-sorted",
PreoptOptimizedMethodLists, PreoptTotalMethodLists,
PreoptTotalMethodLists
? 100.0*PreoptOptimizedMethodLists/PreoptTotalMethodLists
: 0.0);
_objc_inform("PREOPTIMIZATION: %u/%u (%.3g%%) classes pre-registered",
PreoptOptimizedClasses, PreoptTotalClasses,
PreoptTotalClasses
? 100.0*PreoptOptimizedClasses/PreoptTotalClasses
: 0.0);
_objc_inform("PREOPTIMIZATION: %zu protocol references not "
"pre-optimized", UnfixedProtocolReferences);
}
#undef EACH_HEADER
}
/***********************************************************************
* prepare_load_methods
* Schedule +load for classes in this image, any un-+load-ed
* superclasses in other images, and any categories in this image.
**********************************************************************/
// Recursively schedule +load for cls and any un-+load-ed superclasses.
// cls must already be connected.
static void schedule_class_load(Class cls)
{
if (!cls) return;
ASSERT(cls->isRealized()); // _read_images should realize
if (cls->data()->flags & RW_LOADED) return;
// Ensure superclass-first ordering
schedule_class_load(cls->getSuperclass());
add_class_to_loadable_list(cls);
cls->setInfo(RW_LOADED);
}
// Quick scan for +load methods that doesn't take a lock.
bool hasLoadMethods(const headerType *mhdr)
{
size_t count;
if (_getObjc2NonlazyClassList(mhdr, &count) && count > 0) return true;
if (_getObjc2NonlazyCategoryList(mhdr, &count) && count > 0) return true;
return false;
}
void prepare_load_methods(const headerType *mhdr)
{
size_t count, i;
runtimeLock.assertLocked();
classref_t const *classlist =
_getObjc2NonlazyClassList(mhdr, &count);
for (i = 0; i < count; i++) {
schedule_class_load(remapClass(classlist[i]));
}
category_t * const *categorylist = _getObjc2NonlazyCategoryList(mhdr, &count);
for (i = 0; i < count; i++) {
category_t *cat = categorylist[i];
Class cls = remapClass(cat->cls);
if (!cls) continue; // category for ignored weak-linked class
if (cls->isSwiftStable()) {
_objc_fatal("Swift class extensions and categories on Swift "
"classes are not allowed to have +load methods");
}
realizeClassWithoutSwift(cls, nil);
ASSERT(cls->ISA()->isRealized());
add_category_to_loadable_list(cat);
}
}
/***********************************************************************
* _unload_image
* Only handles MH_BUNDLE for now.
* Locking: write-lock and loadMethodLock acquired by unmap_image
**********************************************************************/
void _unload_image(header_info *hi)
{
size_t count, i;
loadMethodLock.assertLocked();
runtimeLock.assertLocked();
// Unload unattached categories and categories waiting for +load.
// Ignore __objc_catlist2. We don't support unloading Swift
// and we never will.
category_t * const *catlist = hi->catlist(&count);
for (i = 0; i < count; i++) {
category_t *cat = catlist[i];
Class cls = remapClass(cat->cls);
if (!cls) continue; // category for ignored weak-linked class
// fixme for MH_DYLIB cat's class may have been unloaded already
// unattached list
objc::unattachedCategories.eraseCategoryForClass(cat, cls);
// +load queue
remove_category_from_loadable_list(cat);
}
// Unload classes.
// Gather classes from both __DATA,__objc_clslist
// and __DATA,__objc_nlclslist. arclite's hack puts a class in the latter
// only, and we need to unload that class if we unload an arclite image.
objc::DenseSet<Class> classes{};
classref_t const *classlist;
classlist = _getObjc2ClassList(hi, &count);
for (i = 0; i < count; i++) {
Class cls = remapClass(classlist[i]);
if (cls) classes.insert(cls);
}
classlist = hi->nlclslist(&count);
for (i = 0; i < count; i++) {
Class cls = remapClass(classlist[i]);
if (cls) classes.insert(cls);
}
// First detach classes from each other. Then free each class.
// This avoid bugs where this loop unloads a subclass before its superclass
for (Class cls: classes) {
remove_class_from_loadable_list(cls);
detach_class(cls->ISA(), YES);
detach_class(cls, NO);
}
for (Class cls: classes) {
free_class(cls->ISA());
free_class(cls);
}
// XXX FIXME -- Clean up protocols:
// <rdar://problem/9033191> Support unloading protocols at dylib/image unload time
// fixme DebugUnload
}
/***********************************************************************
* method_getDescription
* Returns a pointer to this method's objc_method_description.
* Locking: none
**********************************************************************/
struct objc_method_description *
method_getDescription(Method m)
{
if (!m) return nil;
return m->getDescription();
}
IMP
method_getImplementation(Method m)
{
return m ? m->imp(true) : nil;
}
IMPAndSEL _method_getImplementationAndName(Method m)
{
return { m->imp(true), m->name() };
}
/***********************************************************************
* method_getName
* Returns this method's selector.
* The method must not be nil.
* The method must already have been fixed-up.
* Locking: none
**********************************************************************/
SEL
method_getName(Method m)
{
if (!m) return nil;
ASSERT(m->name() == sel_registerName(sel_getName(m->name())));
return m->name();
}
/***********************************************************************
* method_getTypeEncoding
* Returns this method's old-style type encoding string.
* The method must not be nil.
* Locking: none
**********************************************************************/
const char *
method_getTypeEncoding(Method m)
{
if (!m) return nil;
return m->types();
}
/***********************************************************************
* method_setImplementation
* Sets this method's implementation to imp.
* The previous implementation is returned.
**********************************************************************/
static IMP
_method_setImplementation(Class cls, method_t *m, IMP imp)
{
runtimeLock.assertLocked();
if (!m) return nil;
if (!imp) return nil;
IMP old = m->imp(false);
SEL sel = m->name();
m->setImp(imp);
// Cache updates are slow if cls is nil (i.e. unknown)
// RR/AWZ updates are slow if cls is nil (i.e. unknown)
// fixme build list of classes whose Methods are known externally?
flushCaches(cls, __func__, [sel, old](Class c){
return c->cache.shouldFlush(sel, old);
});
adjustCustomFlagsForMethodChange(cls, m);
return old;
}
IMP
method_setImplementation(Method m, IMP imp)
{
// Don't know the class - will be slow if RR/AWZ are affected
// fixme build list of classes whose Methods are known externally?
mutex_locker_t lock(runtimeLock);
return _method_setImplementation(Nil, m, imp);
}
extern void _method_setImplementationRawUnsafe(Method m, IMP imp)
{
mutex_locker_t lock(runtimeLock);
m->setImp(imp);
}
void method_exchangeImplementations(Method m1, Method m2)
{
if (!m1 || !m2) return;
mutex_locker_t lock(runtimeLock);
IMP imp1 = m1->imp(false);
IMP imp2 = m2->imp(false);
SEL sel1 = m1->name();
SEL sel2 = m2->name();
m1->setImp(imp2);
m2->setImp(imp1);
// RR/AWZ updates are slow because class is unknown
// Cache updates are slow because class is unknown
// fixme build list of classes whose Methods are known externally?
flushCaches(nil, __func__, [sel1, sel2, imp1, imp2](Class c){
return c->cache.shouldFlush(sel1, imp1) || c->cache.shouldFlush(sel2, imp2);
});
adjustCustomFlagsForMethodChange(nil, m1);
adjustCustomFlagsForMethodChange(nil, m2);
}
/***********************************************************************
* ivar_getOffset
* fixme
* Locking: none
**********************************************************************/
ptrdiff_t
ivar_getOffset(Ivar ivar)
{
if (!ivar) return 0;
return *ivar->offset;
}
/***********************************************************************
* ivar_getName
* fixme
* Locking: none
**********************************************************************/
const char *
ivar_getName(Ivar ivar)
{
if (!ivar) return nil;
return ivar->name;
}
/***********************************************************************
* ivar_getTypeEncoding
* fixme
* Locking: none
**********************************************************************/
const char *
ivar_getTypeEncoding(Ivar ivar)
{
if (!ivar) return nil;
return ivar->type;
}
const char *property_getName(objc_property_t prop)
{
return prop->name;
}
const char *property_getAttributes(objc_property_t prop)
{
return prop->attributes;
}
objc_property_attribute_t *property_copyAttributeList(objc_property_t prop,
unsigned int *outCount)
{
if (!prop) {
if (outCount) *outCount = 0;
return nil;
}
mutex_locker_t lock(runtimeLock);
return copyPropertyAttributeList(prop->attributes,outCount);
}
char * property_copyAttributeValue(objc_property_t prop, const char *name)
{
if (!prop || !name || *name == '\0') return nil;
mutex_locker_t lock(runtimeLock);
return copyPropertyAttributeValue(prop->attributes, name);
}
/***********************************************************************
* getExtendedTypesIndexesForMethod
* Returns:
* a is the count of methods in all method lists before m's method list
* b is the index of m in m's method list
* a+b is the index of m's extended types in the extended types array
**********************************************************************/
static void getExtendedTypesIndexesForMethod(protocol_t *proto, const method_t *m, bool isRequiredMethod, bool isInstanceMethod, uint32_t& a, uint32_t &b)
{
a = 0;
if (proto->instanceMethods) {
if (isRequiredMethod && isInstanceMethod) {
b = proto->instanceMethods->indexOfMethod(m);
return;
}
a += proto->instanceMethods->count;
}
if (proto->classMethods) {
if (isRequiredMethod && !isInstanceMethod) {
b = proto->classMethods->indexOfMethod(m);
return;
}
a += proto->classMethods->count;
}
if (proto->optionalInstanceMethods) {
if (!isRequiredMethod && isInstanceMethod) {
b = proto->optionalInstanceMethods->indexOfMethod(m);
return;
}
a += proto->optionalInstanceMethods->count;
}
if (proto->optionalClassMethods) {
if (!isRequiredMethod && !isInstanceMethod) {
b = proto->optionalClassMethods->indexOfMethod(m);
return;
}
a += proto->optionalClassMethods->count;
}
}
/***********************************************************************
* getExtendedTypesIndexForMethod
* Returns the index of m's extended types in proto's extended types array.
**********************************************************************/
static uint32_t getExtendedTypesIndexForMethod(protocol_t *proto, const method_t *m, bool isRequiredMethod, bool isInstanceMethod)
{
uint32_t a;
uint32_t b;
getExtendedTypesIndexesForMethod(proto, m, isRequiredMethod,
isInstanceMethod, a, b);
return a + b;
}
/***********************************************************************
* fixupProtocolMethodList
* Fixes up a single method list in a protocol.
**********************************************************************/
static void
fixupProtocolMethodList(protocol_t *proto, method_list_t *mlist,
bool required, bool instance)
{
runtimeLock.assertLocked();
if (!mlist) return;
if (mlist->isFixedUp()) return;
const char **extTypes = proto->extendedMethodTypes();
fixupMethodList(mlist, true/*always copy for simplicity*/,
!extTypes/*sort if no extended method types*/);
if (extTypes && !mlist->isSmallList()) {
// Sort method list and extended method types together.
// fixupMethodList() can't do this.
// fixme COW stomp
uint32_t count = mlist->count;
uint32_t prefix;
uint32_t junk;
getExtendedTypesIndexesForMethod(proto, &mlist->get(0),
required, instance, prefix, junk);
for (uint32_t i = 0; i < count; i++) {
for (uint32_t j = i+1; j < count; j++) {
auto& mi = mlist->get(i).big();
auto& mj = mlist->get(j).big();
if (mi.name > mj.name) {
std::swap(mi, mj);
std::swap(extTypes[prefix+i], extTypes[prefix+j]);
}
}
}
}
}
/***********************************************************************
* fixupProtocol
* Fixes up all of a protocol's method lists.
**********************************************************************/
static void
fixupProtocol(protocol_t *proto)
{
runtimeLock.assertLocked();
if (proto->protocols) {
for (uintptr_t i = 0; i < proto->protocols->count; i++) {
protocol_t *sub = remapProtocol(proto->protocols->list[i]);
if (!sub->isFixedUp()) fixupProtocol(sub);
}
}
fixupProtocolMethodList(proto, proto->instanceMethods, YES, YES);
fixupProtocolMethodList(proto, proto->classMethods, YES, NO);
fixupProtocolMethodList(proto, proto->optionalInstanceMethods, NO, YES);
fixupProtocolMethodList(proto, proto->optionalClassMethods, NO, NO);
// fixme memory barrier so we can check this with no lock
proto->setFixedUp();
}
/***********************************************************************
* fixupProtocolIfNeeded
* Fixes up all of a protocol's method lists if they aren't fixed up already.
* Locking: write-locks runtimeLock.
**********************************************************************/
static void
fixupProtocolIfNeeded(protocol_t *proto)
{
runtimeLock.assertUnlocked();
ASSERT(proto);
if (!proto->isFixedUp()) {
mutex_locker_t lock(runtimeLock);
fixupProtocol(proto);
}
}
static method_list_t *
getProtocolMethodList(protocol_t *proto, bool required, bool instance)
{
method_list_t **mlistp = nil;
if (required) {
if (instance) {
mlistp = &proto->instanceMethods;
} else {
mlistp = &proto->classMethods;
}
} else {
if (instance) {
mlistp = &proto->optionalInstanceMethods;
} else {
mlistp = &proto->optionalClassMethods;
}
}
return *mlistp;
}
|