mirror of
https://github.com/google/flatbuffers.git
synced 2025-04-09 00:12:15 +08:00
FlatBuffers 64 for C++ (#7935)
* First working hack of adding 64-bit. Don't judge :) * Made vector_downward work on 64 bit types * vector_downward uses size_t, added offset64 to reflection * cleaned up adding offset64 in parser * Add C++ testing skeleton for 64-bit * working test for CreateVector64 * working >2 GiB buffers * support for large strings * simplified CreateString<> to just provide the offset type * generalize CreateVector template * update test_64.afb due to upstream format change * Added Vector64 type, which is just an alias for vector ATM * Switch to Offset64 for Vector64 * Update for reflection bfbs output change * Starting to add support for vector64 type in C++ * made a generic CreateVector that can handle different offsets and vector types * Support for 32-vector with 64-addressing * Vector64 basic builder + tests working * basic support for json vector64 support * renamed fields in test_64bit.fbs to better reflect their use * working C++ vector64 builder * Apply --annotate-sparse-vector to 64-bit tests * Enable Vector64 for --annotate-sparse-vectors * Merged from upstream * Add `near_string` field for testing 32-bit offsets alongside * keep track of where the 32-bit and 64-bit regions are for flatbufferbuilder * move template<> outside class body for GCC * update run.sh to build and run tests * basic assertion for adding 64-bit offset at the wrong time * started to separate `FlatBufferBuilder` into two classes, 1 64-bit aware, the other not * add test for nested flatbuffer vector64, fix bug in alignment of big vectors * fixed CreateDirect method by iterating by Offset64 first * internal refactoring of flatbufferbuilder * block not supported languages in the parser from using 64-bit * evolution tests for adding a vector64 field * conformity tests for adding/removing offset64 attributes * ensure test is for a big buffer * add parser error tests for `offset64` and `vector64` attributes * add missing static that GCC only complains about * remove stdint-uintn.h header that gets automatically added * move 64-bit CalculateOffset internal * fixed return size of EndVector * various fixes on windows * add SizeT to vector_downward * minimze range of size changes in vector and builder * reworked how tracking if 64-offsets are added * Add ReturnT to EndVector * small cleanups * remove need for second Array definition * combine IndirectHelpers into one definition * started support for vector of struct * Support for 32/64-vectors of structs + Offset64 * small cleanups * add verification for vector64 * add sized prefix for 64-bit buffers * add fuzzer for 64-bit * add example of adding many vectors using a wrapper table * run the new -bfbs-gen-embed logic on the 64-bit tests * remove run.sh and fix cmakelist issue * fixed bazel rules * fixed some PR comments * add 64-bit tests to cmakelist
This commit is contained in:
parent
13fc75cb6b
commit
63b7b25289
@ -234,6 +234,8 @@ set(FlatBuffers_Tests_SRCS
|
||||
tests/native_type_test_impl.cpp
|
||||
tests/alignment_test.h
|
||||
tests/alignment_test.cpp
|
||||
tests/64bit/offset64_test.h
|
||||
tests/64bit/offset64_test.cpp
|
||||
include/flatbuffers/code_generators.h
|
||||
src/code_generators.cpp
|
||||
)
|
||||
@ -527,6 +529,9 @@ if(FLATBUFFERS_BUILD_TESTS)
|
||||
compile_schema_for_test(tests/native_inline_table_test.fbs "${FLATC_OPT_COMP}")
|
||||
compile_schema_for_test(tests/native_type_test.fbs "${FLATC_OPT}")
|
||||
compile_schema_for_test(tests/key_field/key_field_sample.fbs "${FLATC_OPT_COMP}")
|
||||
compile_schema_for_test(tests/64bit/test_64bit.fbs "${FLATC_OPT_COMP};--bfbs-gen-embed")
|
||||
compile_schema_for_test(tests/64bit/evolution/v1.fbs "${FLATC_OPT_COMP}")
|
||||
compile_schema_for_test(tests/64bit/evolution/v2.fbs "${FLATC_OPT_COMP}")
|
||||
|
||||
if(FLATBUFFERS_CODE_SANITIZE)
|
||||
add_fsanitize_to_target(flattests ${FLATBUFFERS_CODE_SANITIZE})
|
||||
|
@ -17,6 +17,7 @@
|
||||
#ifndef FLATBUFFERS_ARRAY_H_
|
||||
#define FLATBUFFERS_ARRAY_H_
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
|
||||
#include "flatbuffers/base.h"
|
||||
@ -37,7 +38,7 @@ template<typename T, uint16_t length> class Array {
|
||||
public:
|
||||
typedef uint16_t size_type;
|
||||
typedef typename IndirectHelper<IndirectHelperType>::return_type return_type;
|
||||
typedef VectorConstIterator<T, return_type> const_iterator;
|
||||
typedef VectorConstIterator<T, return_type, uoffset_t> const_iterator;
|
||||
typedef VectorReverseIterator<const_iterator> const_reverse_iterator;
|
||||
|
||||
// If T is a LE-scalar or a struct (!scalar_tag::value).
|
||||
@ -158,11 +159,13 @@ template<typename T, uint16_t length> class Array {
|
||||
|
||||
// Specialization for Array[struct] with access using Offset<void> pointer.
|
||||
// This specialization used by idl_gen_text.cpp.
|
||||
template<typename T, uint16_t length> class Array<Offset<T>, length> {
|
||||
template<typename T, uint16_t length, template<typename> class OffsetT>
|
||||
class Array<OffsetT<T>, length> {
|
||||
static_assert(flatbuffers::is_same<T, void>::value, "unexpected type T");
|
||||
|
||||
public:
|
||||
typedef const void *return_type;
|
||||
typedef uint16_t size_type;
|
||||
|
||||
const uint8_t *Data() const { return data_; }
|
||||
|
||||
|
@ -43,6 +43,7 @@
|
||||
#include <vector>
|
||||
#include <set>
|
||||
#include <algorithm>
|
||||
#include <limits>
|
||||
#include <iterator>
|
||||
#include <memory>
|
||||
|
||||
@ -323,9 +324,11 @@ namespace flatbuffers {
|
||||
// Also, using a consistent offset type maintains compatibility of serialized
|
||||
// offset values between 32bit and 64bit systems.
|
||||
typedef uint32_t uoffset_t;
|
||||
typedef uint64_t uoffset64_t;
|
||||
|
||||
// Signed offsets for references that can go in both directions.
|
||||
typedef int32_t soffset_t;
|
||||
typedef int64_t soffset64_t;
|
||||
|
||||
// Offset/index used in v-tables, can be changed to uint8_t in
|
||||
// format forks to save a bit of space if desired.
|
||||
@ -334,7 +337,8 @@ typedef uint16_t voffset_t;
|
||||
typedef uintmax_t largest_scalar_t;
|
||||
|
||||
// In 32bits, this evaluates to 2GB - 1
|
||||
#define FLATBUFFERS_MAX_BUFFER_SIZE ((1ULL << (sizeof(::flatbuffers::soffset_t) * 8 - 1)) - 1)
|
||||
#define FLATBUFFERS_MAX_BUFFER_SIZE std::numeric_limits<::flatbuffers::soffset_t>::max()
|
||||
#define FLATBUFFERS_MAX_64_BUFFER_SIZE std::numeric_limits<::flatbuffers::soffset64_t>::max()
|
||||
|
||||
// The minimum size buffer that can be a valid flatbuffer.
|
||||
// Includes the offset to the root table (uoffset_t), the offset to the vtable
|
||||
|
@ -25,14 +25,33 @@ namespace flatbuffers {
|
||||
|
||||
// Wrapper for uoffset_t to allow safe template specialization.
|
||||
// Value is allowed to be 0 to indicate a null object (see e.g. AddOffset).
|
||||
template<typename T> struct Offset {
|
||||
uoffset_t o;
|
||||
template<typename T = void> struct Offset {
|
||||
// The type of offset to use.
|
||||
typedef uoffset_t offset_type;
|
||||
|
||||
offset_type o;
|
||||
Offset() : o(0) {}
|
||||
Offset(uoffset_t _o) : o(_o) {}
|
||||
Offset<void> Union() const { return Offset<void>(o); }
|
||||
Offset(const offset_type _o) : o(_o) {}
|
||||
Offset<> Union() const { return o; }
|
||||
bool IsNull() const { return !o; }
|
||||
};
|
||||
|
||||
// Wrapper for uoffset64_t Offsets.
|
||||
template<typename T = void> struct Offset64 {
|
||||
// The type of offset to use.
|
||||
typedef uoffset64_t offset_type;
|
||||
|
||||
offset_type o;
|
||||
Offset64() : o(0) {}
|
||||
Offset64(const offset_type offset) : o(offset) {}
|
||||
Offset64<> Union() const { return o; }
|
||||
bool IsNull() const { return !o; }
|
||||
};
|
||||
|
||||
// Litmus check for ensuring the Offsets are the expected size.
|
||||
static_assert(sizeof(Offset<>) == 4, "Offset has wrong size");
|
||||
static_assert(sizeof(Offset64<>) == 8, "Offset64 has wrong size");
|
||||
|
||||
inline void EndianCheck() {
|
||||
int endiantest = 1;
|
||||
// If this fails, see FLATBUFFERS_LITTLEENDIAN above.
|
||||
@ -75,35 +94,59 @@ template<typename T> struct IndirectHelper {
|
||||
typedef T return_type;
|
||||
typedef T mutable_return_type;
|
||||
static const size_t element_stride = sizeof(T);
|
||||
static return_type Read(const uint8_t *p, uoffset_t i) {
|
||||
|
||||
static return_type Read(const uint8_t *p, const size_t i) {
|
||||
return EndianScalar((reinterpret_cast<const T *>(p))[i]);
|
||||
}
|
||||
static return_type Read(uint8_t *p, uoffset_t i) {
|
||||
return Read(const_cast<const uint8_t *>(p), i);
|
||||
static mutable_return_type Read(uint8_t *p, const size_t i) {
|
||||
return reinterpret_cast<mutable_return_type>(
|
||||
Read(const_cast<const uint8_t *>(p), i));
|
||||
}
|
||||
};
|
||||
template<typename T> struct IndirectHelper<Offset<T>> {
|
||||
|
||||
// For vector of Offsets.
|
||||
template<typename T, template<typename> class OffsetT>
|
||||
struct IndirectHelper<OffsetT<T>> {
|
||||
typedef const T *return_type;
|
||||
typedef T *mutable_return_type;
|
||||
static const size_t element_stride = sizeof(uoffset_t);
|
||||
static return_type Read(const uint8_t *p, uoffset_t i) {
|
||||
p += i * sizeof(uoffset_t);
|
||||
return reinterpret_cast<return_type>(p + ReadScalar<uoffset_t>(p));
|
||||
typedef typename OffsetT<T>::offset_type offset_type;
|
||||
static const offset_type element_stride = sizeof(offset_type);
|
||||
|
||||
static return_type Read(const uint8_t *const p, const offset_type i) {
|
||||
// Offsets are relative to themselves, so first update the pointer to
|
||||
// point to the offset location.
|
||||
const uint8_t *const offset_location = p + i * element_stride;
|
||||
|
||||
// Then read the scalar value of the offset (which may be 32 or 64-bits) and
|
||||
// then determine the relative location from the offset location.
|
||||
return reinterpret_cast<return_type>(
|
||||
offset_location + ReadScalar<offset_type>(offset_location));
|
||||
}
|
||||
static mutable_return_type Read(uint8_t *p, uoffset_t i) {
|
||||
p += i * sizeof(uoffset_t);
|
||||
return reinterpret_cast<mutable_return_type>(p + ReadScalar<uoffset_t>(p));
|
||||
static mutable_return_type Read(uint8_t *const p, const offset_type i) {
|
||||
// Offsets are relative to themselves, so first update the pointer to
|
||||
// point to the offset location.
|
||||
uint8_t *const offset_location = p + i * element_stride;
|
||||
|
||||
// Then read the scalar value of the offset (which may be 32 or 64-bits) and
|
||||
// then determine the relative location from the offset location.
|
||||
return reinterpret_cast<mutable_return_type>(
|
||||
offset_location + ReadScalar<offset_type>(offset_location));
|
||||
}
|
||||
};
|
||||
|
||||
// For vector of structs.
|
||||
template<typename T> struct IndirectHelper<const T *> {
|
||||
typedef const T *return_type;
|
||||
typedef T *mutable_return_type;
|
||||
static const size_t element_stride = sizeof(T);
|
||||
static return_type Read(const uint8_t *p, uoffset_t i) {
|
||||
return reinterpret_cast<return_type>(p + i * sizeof(T));
|
||||
|
||||
static return_type Read(const uint8_t *const p, const size_t i) {
|
||||
// Structs are stored inline, relative to the first struct pointer.
|
||||
return reinterpret_cast<return_type>(p + i * element_stride);
|
||||
}
|
||||
static mutable_return_type Read(uint8_t *p, uoffset_t i) {
|
||||
return reinterpret_cast<mutable_return_type>(p + i * sizeof(T));
|
||||
static mutable_return_type Read(uint8_t *const p, const size_t i) {
|
||||
// Structs are stored inline, relative to the first struct pointer.
|
||||
return reinterpret_cast<mutable_return_type>(p + i * element_stride);
|
||||
}
|
||||
};
|
||||
|
||||
@ -130,23 +173,25 @@ inline bool BufferHasIdentifier(const void *buf, const char *identifier,
|
||||
/// @cond FLATBUFFERS_INTERNAL
|
||||
// Helpers to get a typed pointer to the root object contained in the buffer.
|
||||
template<typename T> T *GetMutableRoot(void *buf) {
|
||||
if (!buf) return nullptr;
|
||||
EndianCheck();
|
||||
return reinterpret_cast<T *>(
|
||||
reinterpret_cast<uint8_t *>(buf) +
|
||||
EndianScalar(*reinterpret_cast<uoffset_t *>(buf)));
|
||||
}
|
||||
|
||||
template<typename T> T *GetMutableSizePrefixedRoot(void *buf) {
|
||||
return GetMutableRoot<T>(reinterpret_cast<uint8_t *>(buf) +
|
||||
sizeof(uoffset_t));
|
||||
template<typename T, typename SizeT = uoffset_t>
|
||||
T *GetMutableSizePrefixedRoot(void *buf) {
|
||||
return GetMutableRoot<T>(reinterpret_cast<uint8_t *>(buf) + sizeof(SizeT));
|
||||
}
|
||||
|
||||
template<typename T> const T *GetRoot(const void *buf) {
|
||||
return GetMutableRoot<T>(const_cast<void *>(buf));
|
||||
}
|
||||
|
||||
template<typename T> const T *GetSizePrefixedRoot(const void *buf) {
|
||||
return GetRoot<T>(reinterpret_cast<const uint8_t *>(buf) + sizeof(uoffset_t));
|
||||
template<typename T, typename SizeT = uoffset_t>
|
||||
const T *GetSizePrefixedRoot(const void *buf) {
|
||||
return GetRoot<T>(reinterpret_cast<const uint8_t *>(buf) + sizeof(SizeT));
|
||||
}
|
||||
|
||||
} // namespace flatbuffers
|
||||
|
@ -18,12 +18,15 @@
|
||||
#define FLATBUFFERS_FLATBUFFER_BUILDER_H_
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <initializer_list>
|
||||
#include <type_traits>
|
||||
|
||||
#include "flatbuffers/allocator.h"
|
||||
#include "flatbuffers/array.h"
|
||||
#include "flatbuffers/base.h"
|
||||
#include "flatbuffers/buffer.h"
|
||||
#include "flatbuffers/buffer_ref.h"
|
||||
#include "flatbuffers/default_allocator.h"
|
||||
#include "flatbuffers/detached_buffer.h"
|
||||
@ -40,8 +43,9 @@ namespace flatbuffers {
|
||||
// Converts a Field ID to a virtual table offset.
|
||||
inline voffset_t FieldIndexToOffset(voffset_t field_id) {
|
||||
// Should correspond to what EndTable() below builds up.
|
||||
const voffset_t fixed_fields = 2 * sizeof(voffset_t); // Vtable size and Object Size.
|
||||
return fixed_fields + field_id * sizeof(voffset_t);
|
||||
const voffset_t fixed_fields =
|
||||
2 * sizeof(voffset_t); // Vtable size and Object Size.
|
||||
return fixed_fields + field_id * sizeof(voffset_t);
|
||||
}
|
||||
|
||||
template<typename T, typename Alloc = std::allocator<T>>
|
||||
@ -68,8 +72,13 @@ T *data(std::vector<T, Alloc> &v) {
|
||||
/// `PushElement`/`AddElement`/`EndTable`, or the builtin `CreateString`/
|
||||
/// `CreateVector` functions. Do this is depth-first order to build up a tree to
|
||||
/// the root. `Finish()` wraps up the buffer ready for transport.
|
||||
class FlatBufferBuilder {
|
||||
template<bool Is64Aware = false> class FlatBufferBuilderImpl {
|
||||
public:
|
||||
// This switches the size type of the builder, based on if its 64-bit aware
|
||||
// (uoffset64_t) or not (uoffset_t).
|
||||
typedef
|
||||
typename std::conditional<Is64Aware, uoffset64_t, uoffset_t>::type SizeT;
|
||||
|
||||
/// @brief Default constructor for FlatBufferBuilder.
|
||||
/// @param[in] initial_size The initial size of the buffer, in bytes. Defaults
|
||||
/// to `1024`.
|
||||
@ -81,13 +90,16 @@ class FlatBufferBuilder {
|
||||
/// minimum alignment upon reallocation. Only needed if you intend to store
|
||||
/// types with custom alignment AND you wish to read the buffer in-place
|
||||
/// directly after creation.
|
||||
explicit FlatBufferBuilder(
|
||||
explicit FlatBufferBuilderImpl(
|
||||
size_t initial_size = 1024, Allocator *allocator = nullptr,
|
||||
bool own_allocator = false,
|
||||
size_t buffer_minalign = AlignOf<largest_scalar_t>())
|
||||
: buf_(initial_size, allocator, own_allocator, buffer_minalign),
|
||||
: buf_(initial_size, allocator, own_allocator, buffer_minalign,
|
||||
static_cast<SizeT>(Is64Aware ? FLATBUFFERS_MAX_64_BUFFER_SIZE
|
||||
: FLATBUFFERS_MAX_BUFFER_SIZE)),
|
||||
num_field_loc(0),
|
||||
max_voffset_(0),
|
||||
length_of_64_bit_region_(0),
|
||||
nested(false),
|
||||
finished(false),
|
||||
minalign_(1),
|
||||
@ -98,10 +110,13 @@ class FlatBufferBuilder {
|
||||
}
|
||||
|
||||
/// @brief Move constructor for FlatBufferBuilder.
|
||||
FlatBufferBuilder(FlatBufferBuilder &&other) noexcept
|
||||
: buf_(1024, nullptr, false, AlignOf<largest_scalar_t>()),
|
||||
FlatBufferBuilderImpl(FlatBufferBuilderImpl &&other) noexcept
|
||||
: buf_(1024, nullptr, false, AlignOf<largest_scalar_t>(),
|
||||
static_cast<SizeT>(Is64Aware ? FLATBUFFERS_MAX_64_BUFFER_SIZE
|
||||
: FLATBUFFERS_MAX_BUFFER_SIZE)),
|
||||
num_field_loc(0),
|
||||
max_voffset_(0),
|
||||
length_of_64_bit_region_(0),
|
||||
nested(false),
|
||||
finished(false),
|
||||
minalign_(1),
|
||||
@ -116,18 +131,19 @@ class FlatBufferBuilder {
|
||||
}
|
||||
|
||||
/// @brief Move assignment operator for FlatBufferBuilder.
|
||||
FlatBufferBuilder &operator=(FlatBufferBuilder &&other) noexcept {
|
||||
FlatBufferBuilderImpl &operator=(FlatBufferBuilderImpl &&other) noexcept {
|
||||
// Move construct a temporary and swap idiom
|
||||
FlatBufferBuilder temp(std::move(other));
|
||||
FlatBufferBuilderImpl temp(std::move(other));
|
||||
Swap(temp);
|
||||
return *this;
|
||||
}
|
||||
|
||||
void Swap(FlatBufferBuilder &other) {
|
||||
void Swap(FlatBufferBuilderImpl &other) {
|
||||
using std::swap;
|
||||
buf_.swap(other.buf_);
|
||||
swap(num_field_loc, other.num_field_loc);
|
||||
swap(max_voffset_, other.max_voffset_);
|
||||
swap(length_of_64_bit_region_, other.length_of_64_bit_region_);
|
||||
swap(nested, other.nested);
|
||||
swap(finished, other.finished);
|
||||
swap(minalign_, other.minalign_);
|
||||
@ -136,7 +152,7 @@ class FlatBufferBuilder {
|
||||
swap(string_pool, other.string_pool);
|
||||
}
|
||||
|
||||
~FlatBufferBuilder() {
|
||||
~FlatBufferBuilderImpl() {
|
||||
if (string_pool) delete string_pool;
|
||||
}
|
||||
|
||||
@ -153,12 +169,36 @@ class FlatBufferBuilder {
|
||||
nested = false;
|
||||
finished = false;
|
||||
minalign_ = 1;
|
||||
length_of_64_bit_region_ = 0;
|
||||
if (string_pool) string_pool->clear();
|
||||
}
|
||||
|
||||
/// @brief The current size of the serialized buffer, counting from the end.
|
||||
/// @return Returns an `SizeT` with the current size of the buffer.
|
||||
SizeT GetSize() const { return buf_.size(); }
|
||||
|
||||
/// @brief The current size of the serialized buffer relative to the end of
|
||||
/// the 32-bit region.
|
||||
/// @return Returns an `uoffset_t` with the current size of the buffer.
|
||||
uoffset_t GetSize() const { return buf_.size(); }
|
||||
template<bool is_64 = Is64Aware>
|
||||
// Only enable this method for the 64-bit builder, as only that builder is
|
||||
// concerned with the 32/64-bit boundary, and should be the one to bare any
|
||||
// run time costs.
|
||||
typename std::enable_if<is_64, uoffset_t>::type GetSizeRelative32BitRegion()
|
||||
const {
|
||||
//[32-bit region][64-bit region]
|
||||
// [XXXXXXXXXXXXXXXXXXX] GetSize()
|
||||
// [YYYYYYYYYYYYY] length_of_64_bit_region_
|
||||
// [ZZZZ] return size
|
||||
return static_cast<uoffset_t>(GetSize() - length_of_64_bit_region_);
|
||||
}
|
||||
|
||||
template<bool is_64 = Is64Aware>
|
||||
// Only enable this method for the 32-bit builder.
|
||||
typename std::enable_if<!is_64, uoffset_t>::type GetSizeRelative32BitRegion()
|
||||
const {
|
||||
return static_cast<uoffset_t>(GetSize());
|
||||
}
|
||||
|
||||
/// @brief Get the serialized buffer (after you call `Finish()`).
|
||||
/// @return Returns an `uint8_t` pointer to the FlatBuffer data inside the
|
||||
@ -270,14 +310,16 @@ class FlatBufferBuilder {
|
||||
}
|
||||
|
||||
// Write a single aligned scalar to the buffer
|
||||
template<typename T> uoffset_t PushElement(T element) {
|
||||
template<typename T, typename ReturnT = uoffset_t>
|
||||
ReturnT PushElement(T element) {
|
||||
AssertScalarT<T>();
|
||||
Align(sizeof(T));
|
||||
buf_.push_small(EndianScalar(element));
|
||||
return GetSize();
|
||||
return CalculateOffset<ReturnT>();
|
||||
}
|
||||
|
||||
template<typename T> uoffset_t PushElement(Offset<T> off) {
|
||||
template<typename T, template<typename> class OffsetT = Offset>
|
||||
uoffset_t PushElement(OffsetT<T> off) {
|
||||
// Special case for offsets: see ReferTo below.
|
||||
return PushElement(ReferTo(off.o));
|
||||
}
|
||||
@ -307,11 +349,16 @@ class FlatBufferBuilder {
|
||||
AddElement(field, ReferTo(off.o), static_cast<uoffset_t>(0));
|
||||
}
|
||||
|
||||
template<typename T> void AddOffset(voffset_t field, Offset64<T> off) {
|
||||
if (off.IsNull()) return; // Don't store.
|
||||
AddElement(field, ReferTo(off.o), static_cast<uoffset64_t>(0));
|
||||
}
|
||||
|
||||
template<typename T> void AddStruct(voffset_t field, const T *structptr) {
|
||||
if (!structptr) return; // Default, don't store.
|
||||
Align(AlignOf<T>());
|
||||
buf_.push_small(*structptr);
|
||||
TrackField(field, GetSize());
|
||||
TrackField(field, CalculateOffset<uoffset_t>());
|
||||
}
|
||||
|
||||
void AddStructOffset(voffset_t field, uoffset_t off) {
|
||||
@ -322,12 +369,29 @@ class FlatBufferBuilder {
|
||||
// This function converts them to be relative to the current location
|
||||
// in the buffer (when stored here), pointing upwards.
|
||||
uoffset_t ReferTo(uoffset_t off) {
|
||||
// Align to ensure GetSize() below is correct.
|
||||
// Align to ensure GetSizeRelative32BitRegion() below is correct.
|
||||
Align(sizeof(uoffset_t));
|
||||
// Offset must refer to something already in buffer.
|
||||
const uoffset_t size = GetSize();
|
||||
// 32-bit offsets are relative to the tail of the 32-bit region of the
|
||||
// buffer. For most cases (without 64-bit entities) this is equivalent to
|
||||
// size of the whole buffer (e.g. GetSize())
|
||||
return ReferTo(off, GetSizeRelative32BitRegion());
|
||||
}
|
||||
|
||||
uoffset64_t ReferTo(uoffset64_t off) {
|
||||
// Align to ensure GetSize() below is correct.
|
||||
Align(sizeof(uoffset64_t));
|
||||
// 64-bit offsets are relative to tail of the whole buffer
|
||||
return ReferTo(off, GetSize());
|
||||
}
|
||||
|
||||
template<typename T, typename T2> T ReferTo(const T off, const T2 size) {
|
||||
FLATBUFFERS_ASSERT(off && off <= size);
|
||||
return size - off + static_cast<uoffset_t>(sizeof(uoffset_t));
|
||||
return size - off + static_cast<T>(sizeof(T));
|
||||
}
|
||||
|
||||
template<typename T> T ReferTo(const T off, const T size) {
|
||||
FLATBUFFERS_ASSERT(off && off <= size);
|
||||
return size - off + static_cast<T>(sizeof(T));
|
||||
}
|
||||
|
||||
void NotNested() {
|
||||
@ -349,7 +413,7 @@ class FlatBufferBuilder {
|
||||
uoffset_t StartTable() {
|
||||
NotNested();
|
||||
nested = true;
|
||||
return GetSize();
|
||||
return GetSizeRelative32BitRegion();
|
||||
}
|
||||
|
||||
// This finishes one serialized object by generating the vtable if it's a
|
||||
@ -360,7 +424,9 @@ class FlatBufferBuilder {
|
||||
FLATBUFFERS_ASSERT(nested);
|
||||
// Write the vtable offset, which is the start of any Table.
|
||||
// We fill its value later.
|
||||
const uoffset_t vtableoffsetloc = PushElement<soffset_t>(0);
|
||||
// This is relative to the end of the 32-bit region.
|
||||
const uoffset_t vtable_offset_loc =
|
||||
static_cast<uoffset_t>(PushElement<soffset_t>(0));
|
||||
// Write a vtable, which consists entirely of voffset_t elements.
|
||||
// It starts with the number of offsets, followed by a type id, followed
|
||||
// by the offsets themselves. In reverse:
|
||||
@ -370,7 +436,7 @@ class FlatBufferBuilder {
|
||||
(std::max)(static_cast<voffset_t>(max_voffset_ + sizeof(voffset_t)),
|
||||
FieldIndexToOffset(0));
|
||||
buf_.fill_big(max_voffset_);
|
||||
auto table_object_size = vtableoffsetloc - start;
|
||||
const uoffset_t table_object_size = vtable_offset_loc - start;
|
||||
// Vtable use 16bit offsets.
|
||||
FLATBUFFERS_ASSERT(table_object_size < 0x10000);
|
||||
WriteScalar<voffset_t>(buf_.data() + sizeof(voffset_t),
|
||||
@ -380,7 +446,8 @@ class FlatBufferBuilder {
|
||||
for (auto it = buf_.scratch_end() - num_field_loc * sizeof(FieldLoc);
|
||||
it < buf_.scratch_end(); it += sizeof(FieldLoc)) {
|
||||
auto field_location = reinterpret_cast<FieldLoc *>(it);
|
||||
auto pos = static_cast<voffset_t>(vtableoffsetloc - field_location->off);
|
||||
const voffset_t pos =
|
||||
static_cast<voffset_t>(vtable_offset_loc - field_location->off);
|
||||
// If this asserts, it means you've set a field twice.
|
||||
FLATBUFFERS_ASSERT(
|
||||
!ReadScalar<voffset_t>(buf_.data() + field_location->id));
|
||||
@ -389,7 +456,7 @@ class FlatBufferBuilder {
|
||||
ClearOffsets();
|
||||
auto vt1 = reinterpret_cast<voffset_t *>(buf_.data());
|
||||
auto vt1_size = ReadScalar<voffset_t>(vt1);
|
||||
auto vt_use = GetSize();
|
||||
auto vt_use = GetSizeRelative32BitRegion();
|
||||
// See if we already have generated a vtable with this exact same
|
||||
// layout before. If so, make it point to the old one, remove this one.
|
||||
if (dedup_vtables_) {
|
||||
@ -400,23 +467,24 @@ class FlatBufferBuilder {
|
||||
auto vt2_size = ReadScalar<voffset_t>(vt2);
|
||||
if (vt1_size != vt2_size || 0 != memcmp(vt2, vt1, vt1_size)) continue;
|
||||
vt_use = *vt_offset_ptr;
|
||||
buf_.pop(GetSize() - static_cast<size_t>(vtableoffsetloc));
|
||||
buf_.pop(GetSizeRelative32BitRegion() - vtable_offset_loc);
|
||||
break;
|
||||
}
|
||||
}
|
||||
// If this is a new vtable, remember it.
|
||||
if (vt_use == GetSize()) { buf_.scratch_push_small(vt_use); }
|
||||
if (vt_use == GetSizeRelative32BitRegion()) {
|
||||
buf_.scratch_push_small(vt_use);
|
||||
}
|
||||
// Fill the vtable offset we created above.
|
||||
// The offset points from the beginning of the object to where the
|
||||
// vtable is stored.
|
||||
// The offset points from the beginning of the object to where the vtable is
|
||||
// stored.
|
||||
// Offsets default direction is downward in memory for future format
|
||||
// flexibility (storing all vtables at the start of the file).
|
||||
WriteScalar(buf_.data_at(vtableoffsetloc),
|
||||
WriteScalar(buf_.data_at(vtable_offset_loc + length_of_64_bit_region_),
|
||||
static_cast<soffset_t>(vt_use) -
|
||||
static_cast<soffset_t>(vtableoffsetloc));
|
||||
|
||||
static_cast<soffset_t>(vtable_offset_loc));
|
||||
nested = false;
|
||||
return vtableoffsetloc;
|
||||
return vtable_offset_loc;
|
||||
}
|
||||
|
||||
FLATBUFFERS_ATTRIBUTE([[deprecated("call the version above instead")]])
|
||||
@ -426,14 +494,20 @@ class FlatBufferBuilder {
|
||||
|
||||
// This checks a required field has been set in a given table that has
|
||||
// just been constructed.
|
||||
template<typename T> void Required(Offset<T> table, voffset_t field);
|
||||
template<typename T> void Required(Offset<T> table, voffset_t field) {
|
||||
auto table_ptr = reinterpret_cast<const Table *>(buf_.data_at(table.o));
|
||||
bool ok = table_ptr->GetOptionalFieldOffset(field) != 0;
|
||||
// If this fails, the caller will show what field needs to be set.
|
||||
FLATBUFFERS_ASSERT(ok);
|
||||
(void)ok;
|
||||
}
|
||||
|
||||
uoffset_t StartStruct(size_t alignment) {
|
||||
Align(alignment);
|
||||
return GetSize();
|
||||
return GetSizeRelative32BitRegion();
|
||||
}
|
||||
|
||||
uoffset_t EndStruct() { return GetSize(); }
|
||||
uoffset_t EndStruct() { return GetSizeRelative32BitRegion(); }
|
||||
|
||||
void ClearOffsets() {
|
||||
buf_.scratch_pop(num_field_loc * sizeof(FieldLoc));
|
||||
@ -442,15 +516,18 @@ class FlatBufferBuilder {
|
||||
}
|
||||
|
||||
// Aligns such that when "len" bytes are written, an object can be written
|
||||
// after it with "alignment" without padding.
|
||||
// after it (forward in the buffer) with "alignment" without padding.
|
||||
void PreAlign(size_t len, size_t alignment) {
|
||||
if (len == 0) return;
|
||||
TrackMinAlign(alignment);
|
||||
buf_.fill(PaddingBytes(GetSize() + len, alignment));
|
||||
}
|
||||
template<typename T> void PreAlign(size_t len) {
|
||||
AssertScalarT<T>();
|
||||
PreAlign(len, AlignOf<T>());
|
||||
|
||||
// Aligns such than when "len" bytes are written, an object of type `AlignT`
|
||||
// can be written after it (forward in the buffer) without padding.
|
||||
template<typename AlignT> void PreAlign(size_t len) {
|
||||
AssertScalarT<AlignT>();
|
||||
PreAlign(len, AlignOf<AlignT>());
|
||||
}
|
||||
/// @endcond
|
||||
|
||||
@ -458,34 +535,35 @@ class FlatBufferBuilder {
|
||||
/// @param[in] str A const char pointer to the data to be stored as a string.
|
||||
/// @param[in] len The number of bytes that should be stored from `str`.
|
||||
/// @return Returns the offset in the buffer where the string starts.
|
||||
Offset<String> CreateString(const char *str, size_t len) {
|
||||
NotNested();
|
||||
PreAlign<uoffset_t>(len + 1); // Always 0-terminated.
|
||||
buf_.fill(1);
|
||||
PushBytes(reinterpret_cast<const uint8_t *>(str), len);
|
||||
PushElement(static_cast<uoffset_t>(len));
|
||||
return Offset<String>(GetSize());
|
||||
template<template<typename> class OffsetT = Offset>
|
||||
OffsetT<String> CreateString(const char *str, size_t len) {
|
||||
CreateStringImpl(str, len);
|
||||
return OffsetT<String>(
|
||||
CalculateOffset<typename OffsetT<String>::offset_type>());
|
||||
}
|
||||
|
||||
/// @brief Store a string in the buffer, which is null-terminated.
|
||||
/// @param[in] str A const char pointer to a C-string to add to the buffer.
|
||||
/// @return Returns the offset in the buffer where the string starts.
|
||||
Offset<String> CreateString(const char *str) {
|
||||
return CreateString(str, strlen(str));
|
||||
template<template<typename> class OffsetT = Offset>
|
||||
OffsetT<String> CreateString(const char *str) {
|
||||
return CreateString<OffsetT>(str, strlen(str));
|
||||
}
|
||||
|
||||
/// @brief Store a string in the buffer, which is null-terminated.
|
||||
/// @param[in] str A char pointer to a C-string to add to the buffer.
|
||||
/// @return Returns the offset in the buffer where the string starts.
|
||||
Offset<String> CreateString(char *str) {
|
||||
return CreateString(str, strlen(str));
|
||||
template<template<typename> class OffsetT = Offset>
|
||||
OffsetT<String> CreateString(char *str) {
|
||||
return CreateString<OffsetT>(str, strlen(str));
|
||||
}
|
||||
|
||||
/// @brief Store a string in the buffer, which can contain any binary data.
|
||||
/// @param[in] str A const reference to a std::string to store in the buffer.
|
||||
/// @return Returns the offset in the buffer where the string starts.
|
||||
Offset<String> CreateString(const std::string &str) {
|
||||
return CreateString(str.c_str(), str.length());
|
||||
template<template<typename> class OffsetT = Offset>
|
||||
OffsetT<String> CreateString(const std::string &str) {
|
||||
return CreateString<OffsetT>(str.c_str(), str.length());
|
||||
}
|
||||
|
||||
// clang-format off
|
||||
@ -493,8 +571,9 @@ class FlatBufferBuilder {
|
||||
/// @brief Store a string in the buffer, which can contain any binary data.
|
||||
/// @param[in] str A const string_view to copy in to the buffer.
|
||||
/// @return Returns the offset in the buffer where the string starts.
|
||||
Offset<String> CreateString(flatbuffers::string_view str) {
|
||||
return CreateString(str.data(), str.size());
|
||||
template<template <typename> class OffsetT = Offset>
|
||||
OffsetT<String>CreateString(flatbuffers::string_view str) {
|
||||
return CreateString<OffsetT>(str.data(), str.size());
|
||||
}
|
||||
#endif // FLATBUFFERS_HAS_STRING_VIEW
|
||||
// clang-format on
|
||||
@ -502,16 +581,21 @@ class FlatBufferBuilder {
|
||||
/// @brief Store a string in the buffer, which can contain any binary data.
|
||||
/// @param[in] str A const pointer to a `String` struct to add to the buffer.
|
||||
/// @return Returns the offset in the buffer where the string starts
|
||||
Offset<String> CreateString(const String *str) {
|
||||
return str ? CreateString(str->c_str(), str->size()) : 0;
|
||||
template<template<typename> class OffsetT = Offset>
|
||||
OffsetT<String> CreateString(const String *str) {
|
||||
return str ? CreateString<OffsetT>(str->c_str(), str->size()) : 0;
|
||||
}
|
||||
|
||||
/// @brief Store a string in the buffer, which can contain any binary data.
|
||||
/// @param[in] str A const reference to a std::string like type with support
|
||||
/// of T::c_str() and T::length() to store in the buffer.
|
||||
/// @return Returns the offset in the buffer where the string starts.
|
||||
template<typename T> Offset<String> CreateString(const T &str) {
|
||||
return CreateString(str.c_str(), str.length());
|
||||
template<template<typename> class OffsetT = Offset,
|
||||
// No need to explicitly declare the T type, let the compiler deduce
|
||||
// it.
|
||||
int &...ExplicitArgumentBarrier, typename T>
|
||||
OffsetT<String> CreateString(const T &str) {
|
||||
return CreateString<OffsetT>(str.c_str(), str.length());
|
||||
}
|
||||
|
||||
/// @brief Store a string in the buffer, which can contain any binary data.
|
||||
@ -523,12 +607,14 @@ class FlatBufferBuilder {
|
||||
/// @return Returns the offset in the buffer where the string starts.
|
||||
Offset<String> CreateSharedString(const char *str, size_t len) {
|
||||
FLATBUFFERS_ASSERT(FLATBUFFERS_GENERAL_HEAP_ALLOC_OK);
|
||||
if (!string_pool)
|
||||
if (!string_pool) {
|
||||
string_pool = new StringOffsetMap(StringOffsetCompare(buf_));
|
||||
}
|
||||
|
||||
const size_t size_before_string = buf_.size();
|
||||
// Must first serialize the string, since the set is all offsets into
|
||||
// buffer.
|
||||
auto off = CreateString(str, len);
|
||||
const Offset<String> off = CreateString<Offset>(str, len);
|
||||
auto it = string_pool->find(off);
|
||||
// If it exists we reuse existing serialized data!
|
||||
if (it != string_pool->end()) {
|
||||
@ -584,21 +670,27 @@ class FlatBufferBuilder {
|
||||
}
|
||||
|
||||
/// @cond FLATBUFFERS_INTERNAL
|
||||
uoffset_t EndVector(size_t len) {
|
||||
template<typename LenT = uoffset_t, typename ReturnT = uoffset_t>
|
||||
ReturnT EndVector(size_t len) {
|
||||
FLATBUFFERS_ASSERT(nested); // Hit if no corresponding StartVector.
|
||||
nested = false;
|
||||
return PushElement(static_cast<uoffset_t>(len));
|
||||
return PushElement<LenT, ReturnT>(static_cast<LenT>(len));
|
||||
}
|
||||
|
||||
template<template<typename> class OffsetT = Offset, typename LenT = uint32_t>
|
||||
void StartVector(size_t len, size_t elemsize, size_t alignment) {
|
||||
NotNested();
|
||||
nested = true;
|
||||
PreAlign<uoffset_t>(len * elemsize);
|
||||
// Align to the Length type of the vector (either 32-bit or 64-bit), so
|
||||
// that the length of the buffer can be added without padding.
|
||||
PreAlign<LenT>(len * elemsize);
|
||||
PreAlign(len * elemsize, alignment); // Just in case elemsize > uoffset_t.
|
||||
}
|
||||
|
||||
template<typename T> void StartVector(size_t len) {
|
||||
return StartVector(len, sizeof(T), AlignOf<T>());
|
||||
template<typename T, template<typename> class OffsetT = Offset,
|
||||
typename LenT = uint32_t>
|
||||
void StartVector(size_t len) {
|
||||
return StartVector<OffsetT, LenT>(len, sizeof(T), AlignOf<T>());
|
||||
}
|
||||
|
||||
// Call this right before StartVector/CreateVector if you want to force the
|
||||
@ -623,31 +715,40 @@ class FlatBufferBuilder {
|
||||
|
||||
/// @brief Serialize an array into a FlatBuffer `vector`.
|
||||
/// @tparam T The data type of the array elements.
|
||||
/// @tparam OffsetT the type of offset to return
|
||||
/// @tparam VectorT the type of vector to cast to.
|
||||
/// @param[in] v A pointer to the array of type `T` to serialize into the
|
||||
/// buffer as a `vector`.
|
||||
/// @param[in] len The number of elements to serialize.
|
||||
/// @return Returns a typed `Offset` into the serialized data indicating
|
||||
/// @return Returns a typed `TOffset` into the serialized data indicating
|
||||
/// where the vector is stored.
|
||||
template<typename T> Offset<Vector<T>> CreateVector(const T *v, size_t len) {
|
||||
template<template<typename...> class OffsetT = Offset,
|
||||
template<typename...> class VectorT = Vector,
|
||||
int &...ExplicitArgumentBarrier, typename T>
|
||||
OffsetT<VectorT<T>> CreateVector(const T *v, size_t len) {
|
||||
// The type of the length field in the vector.
|
||||
typedef typename VectorT<T>::size_type LenT;
|
||||
typedef typename OffsetT<VectorT<T>>::offset_type offset_type;
|
||||
// If this assert hits, you're specifying a template argument that is
|
||||
// causing the wrong overload to be selected, remove it.
|
||||
AssertScalarT<T>();
|
||||
StartVector<T>(len);
|
||||
if (len == 0) { return Offset<Vector<T>>(EndVector(len)); }
|
||||
// clang-format off
|
||||
#if FLATBUFFERS_LITTLEENDIAN
|
||||
PushBytes(reinterpret_cast<const uint8_t *>(v), len * sizeof(T));
|
||||
#else
|
||||
if (sizeof(T) == 1) {
|
||||
PushBytes(reinterpret_cast<const uint8_t *>(v), len);
|
||||
} else {
|
||||
for (auto i = len; i > 0; ) {
|
||||
PushElement(v[--i]);
|
||||
StartVector<T, OffsetT, LenT>(len);
|
||||
if (len > 0) {
|
||||
// clang-format off
|
||||
#if FLATBUFFERS_LITTLEENDIAN
|
||||
PushBytes(reinterpret_cast<const uint8_t *>(v), len * sizeof(T));
|
||||
#else
|
||||
if (sizeof(T) == 1) {
|
||||
PushBytes(reinterpret_cast<const uint8_t *>(v), len);
|
||||
} else {
|
||||
for (auto i = len; i > 0; ) {
|
||||
PushElement(v[--i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
// clang-format on
|
||||
return Offset<Vector<T>>(EndVector(len));
|
||||
#endif
|
||||
// clang-format on
|
||||
}
|
||||
return OffsetT<VectorT<T>>(EndVector<LenT, offset_type>(len));
|
||||
}
|
||||
|
||||
/// @brief Serialize an array like object into a FlatBuffer `vector`.
|
||||
@ -689,6 +790,12 @@ class FlatBufferBuilder {
|
||||
return CreateVector(data(v), v.size());
|
||||
}
|
||||
|
||||
template<template<typename...> class VectorT = Vector64,
|
||||
int &...ExplicitArgumentBarrier, typename T>
|
||||
Offset64<VectorT<T>> CreateVector64(const std::vector<T> &v) {
|
||||
return CreateVector<Offset64, VectorT>(data(v), v.size());
|
||||
}
|
||||
|
||||
// vector<bool> may be implemented using a bit-set, so we can't access it as
|
||||
// an array. Instead, read elements manually.
|
||||
// Background: https://isocpp.org/blog/2012/11/on-vectorbool
|
||||
@ -785,47 +892,19 @@ class FlatBufferBuilder {
|
||||
/// @param[in] len The number of elements to serialize.
|
||||
/// @return Returns a typed `Offset` into the serialized data indicating
|
||||
/// where the vector is stored.
|
||||
template<typename T>
|
||||
Offset<Vector<const T *>> CreateVectorOfStructs(const T *v, size_t len) {
|
||||
StartVector(len * sizeof(T) / AlignOf<T>(), sizeof(T), AlignOf<T>());
|
||||
template<typename T, template<typename...> class OffsetT = Offset,
|
||||
template<typename...> class VectorT = Vector>
|
||||
OffsetT<VectorT<const T *>> CreateVectorOfStructs(const T *v, size_t len) {
|
||||
// The type of the length field in the vector.
|
||||
typedef typename VectorT<T>::size_type LenT;
|
||||
typedef typename OffsetT<VectorT<const T *>>::offset_type offset_type;
|
||||
|
||||
StartVector<OffsetT, LenT>(len * sizeof(T) / AlignOf<T>(), sizeof(T),
|
||||
AlignOf<T>());
|
||||
if (len > 0) {
|
||||
PushBytes(reinterpret_cast<const uint8_t *>(v), sizeof(T) * len);
|
||||
}
|
||||
return Offset<Vector<const T *>>(EndVector(len));
|
||||
}
|
||||
|
||||
/// @brief Serialize an array of native structs into a FlatBuffer `vector`.
|
||||
/// @tparam T The data type of the struct array elements.
|
||||
/// @tparam S The data type of the native struct array elements.
|
||||
/// @param[in] v A pointer to the array of type `S` to serialize into the
|
||||
/// buffer as a `vector`.
|
||||
/// @param[in] len The number of elements to serialize.
|
||||
/// @param[in] pack_func Pointer to a function to convert the native struct
|
||||
/// to the FlatBuffer struct.
|
||||
/// @return Returns a typed `Offset` into the serialized data indicating
|
||||
/// where the vector is stored.
|
||||
template<typename T, typename S>
|
||||
Offset<Vector<const T *>> CreateVectorOfNativeStructs(
|
||||
const S *v, size_t len, T (*const pack_func)(const S &)) {
|
||||
FLATBUFFERS_ASSERT(pack_func);
|
||||
auto structs = StartVectorOfStructs<T>(len);
|
||||
for (size_t i = 0; i < len; i++) { structs[i] = pack_func(v[i]); }
|
||||
return EndVectorOfStructs<T>(len);
|
||||
}
|
||||
|
||||
/// @brief Serialize an array of native structs into a FlatBuffer `vector`.
|
||||
/// @tparam T The data type of the struct array elements.
|
||||
/// @tparam S The data type of the native struct array elements.
|
||||
/// @param[in] v A pointer to the array of type `S` to serialize into the
|
||||
/// buffer as a `vector`.
|
||||
/// @param[in] len The number of elements to serialize.
|
||||
/// @return Returns a typed `Offset` into the serialized data indicating
|
||||
/// where the vector is stored.
|
||||
template<typename T, typename S>
|
||||
Offset<Vector<const T *>> CreateVectorOfNativeStructs(const S *v,
|
||||
size_t len) {
|
||||
extern T Pack(const S &);
|
||||
return CreateVectorOfNativeStructs(v, len, Pack);
|
||||
return OffsetT<VectorT<const T *>>(EndVector<LenT, offset_type>(len));
|
||||
}
|
||||
|
||||
/// @brief Serialize an array of structs into a FlatBuffer `vector`.
|
||||
@ -873,10 +952,52 @@ class FlatBufferBuilder {
|
||||
/// serialize into the buffer as a `vector`.
|
||||
/// @return Returns a typed `Offset` into the serialized data indicating
|
||||
/// where the vector is stored.
|
||||
template<typename T, typename Alloc = std::allocator<T>>
|
||||
Offset<Vector<const T *>> CreateVectorOfStructs(
|
||||
template<typename T, template<typename...> class OffsetT = Offset,
|
||||
template<typename...> class VectorT = Vector,
|
||||
typename Alloc = std::allocator<T>>
|
||||
OffsetT<VectorT<const T *>> CreateVectorOfStructs(
|
||||
const std::vector<T, Alloc> &v) {
|
||||
return CreateVectorOfStructs(data(v), v.size());
|
||||
return CreateVectorOfStructs<T, OffsetT, VectorT>(data(v), v.size());
|
||||
}
|
||||
|
||||
template<template<typename...> class VectorT = Vector64, int &..., typename T>
|
||||
Offset64<VectorT<const T *>> CreateVectorOfStructs64(
|
||||
const std::vector<T> &v) {
|
||||
return CreateVectorOfStructs<T, Offset64, VectorT>(data(v), v.size());
|
||||
}
|
||||
|
||||
/// @brief Serialize an array of native structs into a FlatBuffer `vector`.
|
||||
/// @tparam T The data type of the struct array elements.
|
||||
/// @tparam S The data type of the native struct array elements.
|
||||
/// @param[in] v A pointer to the array of type `S` to serialize into the
|
||||
/// buffer as a `vector`.
|
||||
/// @param[in] len The number of elements to serialize.
|
||||
/// @param[in] pack_func Pointer to a function to convert the native struct
|
||||
/// to the FlatBuffer struct.
|
||||
/// @return Returns a typed `Offset` into the serialized data indicating
|
||||
/// where the vector is stored.
|
||||
template<typename T, typename S>
|
||||
Offset<Vector<const T *>> CreateVectorOfNativeStructs(
|
||||
const S *v, size_t len, T (*const pack_func)(const S &)) {
|
||||
FLATBUFFERS_ASSERT(pack_func);
|
||||
auto structs = StartVectorOfStructs<T>(len);
|
||||
for (size_t i = 0; i < len; i++) { structs[i] = pack_func(v[i]); }
|
||||
return EndVectorOfStructs<T>(len);
|
||||
}
|
||||
|
||||
/// @brief Serialize an array of native structs into a FlatBuffer `vector`.
|
||||
/// @tparam T The data type of the struct array elements.
|
||||
/// @tparam S The data type of the native struct array elements.
|
||||
/// @param[in] v A pointer to the array of type `S` to serialize into the
|
||||
/// buffer as a `vector`.
|
||||
/// @param[in] len The number of elements to serialize.
|
||||
/// @return Returns a typed `Offset` into the serialized data indicating
|
||||
/// where the vector is stored.
|
||||
template<typename T, typename S>
|
||||
Offset<Vector<const T *>> CreateVectorOfNativeStructs(const S *v,
|
||||
size_t len) {
|
||||
extern T Pack(const S &);
|
||||
return CreateVectorOfNativeStructs(v, len, Pack);
|
||||
}
|
||||
|
||||
/// @brief Serialize a `std::vector` of native structs into a FlatBuffer
|
||||
@ -979,14 +1100,14 @@ class FlatBufferBuilder {
|
||||
|
||||
/// @cond FLATBUFFERS_INTERNAL
|
||||
template<typename T> struct TableKeyComparator {
|
||||
TableKeyComparator(vector_downward &buf) : buf_(buf) {}
|
||||
explicit TableKeyComparator(vector_downward<SizeT> &buf) : buf_(buf) {}
|
||||
TableKeyComparator(const TableKeyComparator &other) : buf_(other.buf_) {}
|
||||
bool operator()(const Offset<T> &a, const Offset<T> &b) const {
|
||||
auto table_a = reinterpret_cast<T *>(buf_.data_at(a.o));
|
||||
auto table_b = reinterpret_cast<T *>(buf_.data_at(b.o));
|
||||
return table_a->KeyCompareLessThan(table_b);
|
||||
}
|
||||
vector_downward &buf_;
|
||||
vector_downward<SizeT> &buf_;
|
||||
|
||||
private:
|
||||
FLATBUFFERS_DELETE_FUNC(
|
||||
@ -1034,7 +1155,7 @@ class FlatBufferBuilder {
|
||||
NotNested();
|
||||
StartVector(len, elemsize, alignment);
|
||||
buf_.make_space(len * elemsize);
|
||||
auto vec_start = GetSize();
|
||||
const uoffset_t vec_start = GetSizeRelative32BitRegion();
|
||||
auto vec_end = EndVector(len);
|
||||
*buf = buf_.data_at(vec_start);
|
||||
return vec_end;
|
||||
@ -1085,7 +1206,8 @@ class FlatBufferBuilder {
|
||||
NotNested();
|
||||
Align(AlignOf<T>());
|
||||
buf_.push_small(structobj);
|
||||
return Offset<const T *>(GetSize());
|
||||
return Offset<const T *>(
|
||||
CalculateOffset<typename Offset<const T *>::offset_type>());
|
||||
}
|
||||
|
||||
/// @brief Finish serializing a buffer by writing the root offset.
|
||||
@ -1109,7 +1231,7 @@ class FlatBufferBuilder {
|
||||
Finish(root.o, file_identifier, true);
|
||||
}
|
||||
|
||||
void SwapBufAllocator(FlatBufferBuilder &other) {
|
||||
void SwapBufAllocator(FlatBufferBuilderImpl &other) {
|
||||
buf_.swap_allocator(other.buf_);
|
||||
}
|
||||
|
||||
@ -1119,16 +1241,23 @@ class FlatBufferBuilder {
|
||||
|
||||
protected:
|
||||
// You shouldn't really be copying instances of this class.
|
||||
FlatBufferBuilder(const FlatBufferBuilder &);
|
||||
FlatBufferBuilder &operator=(const FlatBufferBuilder &);
|
||||
FlatBufferBuilderImpl(const FlatBufferBuilderImpl &);
|
||||
FlatBufferBuilderImpl &operator=(const FlatBufferBuilderImpl &);
|
||||
|
||||
void Finish(uoffset_t root, const char *file_identifier, bool size_prefix) {
|
||||
NotNested();
|
||||
buf_.clear_scratch();
|
||||
|
||||
const size_t prefix_size = size_prefix ? sizeof(SizeT) : 0;
|
||||
// Make sure we track the alignment of the size prefix.
|
||||
TrackMinAlign(prefix_size);
|
||||
|
||||
const size_t root_offset_size = sizeof(uoffset_t);
|
||||
const size_t file_id_size = file_identifier ? kFileIdentifierLength : 0;
|
||||
|
||||
// This will cause the whole buffer to be aligned.
|
||||
PreAlign((size_prefix ? sizeof(uoffset_t) : 0) + sizeof(uoffset_t) +
|
||||
(file_identifier ? kFileIdentifierLength : 0),
|
||||
minalign_);
|
||||
PreAlign(prefix_size + root_offset_size + file_id_size, minalign_);
|
||||
|
||||
if (file_identifier) {
|
||||
FLATBUFFERS_ASSERT(strlen(file_identifier) == kFileIdentifierLength);
|
||||
PushBytes(reinterpret_cast<const uint8_t *>(file_identifier),
|
||||
@ -1144,7 +1273,7 @@ class FlatBufferBuilder {
|
||||
voffset_t id;
|
||||
};
|
||||
|
||||
vector_downward buf_;
|
||||
vector_downward<SizeT> buf_;
|
||||
|
||||
// Accumulating offsets of table members while it is being built.
|
||||
// We store these in the scratch pad of buf_, after the vtable offsets.
|
||||
@ -1153,6 +1282,31 @@ class FlatBufferBuilder {
|
||||
// possible vtable.
|
||||
voffset_t max_voffset_;
|
||||
|
||||
// This is the length of the 64-bit region of the buffer. The buffer supports
|
||||
// 64-bit offsets by forcing serialization of those elements in the "tail"
|
||||
// region of the buffer (i.e. "64-bit region"). To properly keep track of
|
||||
// offsets that are referenced from the tail of the buffer to not overflow
|
||||
// their size (e.g. Offset is a uint32_t type), the boundary of the 32-/64-bit
|
||||
// regions must be tracked.
|
||||
//
|
||||
// [ Complete FlatBuffer ]
|
||||
// [32-bit region][64-bit region]
|
||||
// ^ ^
|
||||
// | Tail of the buffer.
|
||||
// |
|
||||
// Tail of the 32-bit region of the buffer.
|
||||
//
|
||||
// This keeps track of the size of the 64-bit region so that the tail of the
|
||||
// 32-bit region can be calculated as `GetSize() - length_of_64_bit_region_`.
|
||||
//
|
||||
// This will remain 0 if no 64-bit offset types are added to the buffer.
|
||||
size_t length_of_64_bit_region_;
|
||||
|
||||
// When true, 64-bit offsets can still be added to the builder. When false,
|
||||
// only 32-bit offsets can be added, and attempts to add a 64-bit offset will
|
||||
// raise an assertion. This is typically a compile-time error in ordering the
|
||||
// serialization of 64-bit offset fields not at the tail of the buffer.
|
||||
|
||||
// Ensure objects are not nested.
|
||||
bool nested;
|
||||
|
||||
@ -1166,14 +1320,15 @@ class FlatBufferBuilder {
|
||||
bool dedup_vtables_;
|
||||
|
||||
struct StringOffsetCompare {
|
||||
StringOffsetCompare(const vector_downward &buf) : buf_(&buf) {}
|
||||
explicit StringOffsetCompare(const vector_downward<SizeT> &buf)
|
||||
: buf_(&buf) {}
|
||||
bool operator()(const Offset<String> &a, const Offset<String> &b) const {
|
||||
auto stra = reinterpret_cast<const String *>(buf_->data_at(a.o));
|
||||
auto strb = reinterpret_cast<const String *>(buf_->data_at(b.o));
|
||||
return StringLessThan(stra->data(), stra->size(), strb->data(),
|
||||
strb->size());
|
||||
}
|
||||
const vector_downward *buf_;
|
||||
const vector_downward<SizeT> *buf_;
|
||||
};
|
||||
|
||||
// For use with CreateSharedString. Instantiated on first use only.
|
||||
@ -1181,23 +1336,122 @@ class FlatBufferBuilder {
|
||||
StringOffsetMap *string_pool;
|
||||
|
||||
private:
|
||||
void CanAddOffset64() {
|
||||
// If you hit this assertion, you are attempting to add a 64-bit offset to
|
||||
// a 32-bit only builder. This is because the builder has overloads that
|
||||
// differ only on the offset size returned: e.g.:
|
||||
//
|
||||
// FlatBufferBuilder builder;
|
||||
// Offset64<String> string_offset = builder.CreateString<Offset64>();
|
||||
//
|
||||
// Either use a 64-bit aware builder, or don't try to create an Offset64
|
||||
// return type.
|
||||
//
|
||||
// TODO(derekbailey): we can probably do more enable_if to avoid this
|
||||
// looking like its possible to the user.
|
||||
static_assert(Is64Aware, "cannot add 64-bit offset to a 32-bit builder");
|
||||
|
||||
// If you hit this assertion, you are attempting to add an 64-bit offset
|
||||
// item after already serializing a 32-bit item. All 64-bit offsets have to
|
||||
// added to the tail of the buffer before any 32-bit items can be added.
|
||||
// Otherwise some items might not be addressable due to the maximum range of
|
||||
// the 32-bit offset.
|
||||
FLATBUFFERS_ASSERT(GetSize() == length_of_64_bit_region_);
|
||||
}
|
||||
|
||||
/// @brief Store a string in the buffer, which can contain any binary data.
|
||||
/// @param[in] str A const char pointer to the data to be stored as a string.
|
||||
/// @param[in] len The number of bytes that should be stored from `str`.
|
||||
/// @return Returns the offset in the buffer where the string starts.
|
||||
void CreateStringImpl(const char *str, size_t len) {
|
||||
NotNested();
|
||||
PreAlign<uoffset_t>(len + 1); // Always 0-terminated.
|
||||
buf_.fill(1);
|
||||
PushBytes(reinterpret_cast<const uint8_t *>(str), len);
|
||||
PushElement(static_cast<uoffset_t>(len));
|
||||
}
|
||||
|
||||
// Allocates space for a vector of structures.
|
||||
// Must be completed with EndVectorOfStructs().
|
||||
template<typename T> T *StartVectorOfStructs(size_t vector_size) {
|
||||
StartVector(vector_size * sizeof(T) / AlignOf<T>(), sizeof(T),
|
||||
AlignOf<T>());
|
||||
template<typename T, template<typename> class OffsetT = Offset>
|
||||
T *StartVectorOfStructs(size_t vector_size) {
|
||||
StartVector<OffsetT>(vector_size * sizeof(T) / AlignOf<T>(), sizeof(T),
|
||||
AlignOf<T>());
|
||||
return reinterpret_cast<T *>(buf_.make_space(vector_size * sizeof(T)));
|
||||
}
|
||||
|
||||
// End the vector of structures in the flatbuffers.
|
||||
// Vector should have previously be started with StartVectorOfStructs().
|
||||
template<typename T, template<typename> class OffsetT = Offset>
|
||||
OffsetT<Vector<const T *>> EndVectorOfStructs(size_t vector_size) {
|
||||
return OffsetT<Vector<const T *>>(
|
||||
EndVector<typename Vector<const T *>::size_type,
|
||||
typename OffsetT<Vector<const T *>>::offset_type>(
|
||||
vector_size));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
Offset<Vector<const T *>> EndVectorOfStructs(size_t vector_size) {
|
||||
return Offset<Vector<const T *>>(EndVector(vector_size));
|
||||
typename std::enable_if<std::is_same<T, uoffset_t>::value, T>::type
|
||||
CalculateOffset() {
|
||||
// Default to the end of the 32-bit region. This may or may not be the end
|
||||
// of the buffer, depending on if any 64-bit offsets have been added.
|
||||
return GetSizeRelative32BitRegion();
|
||||
}
|
||||
|
||||
// Specializations to handle the 64-bit CalculateOffset, which is relative to
|
||||
// end of the buffer.
|
||||
template<typename T>
|
||||
typename std::enable_if<std::is_same<T, uoffset64_t>::value, T>::type
|
||||
CalculateOffset() {
|
||||
// This should never be compiled in when not using a 64-bit builder.
|
||||
static_assert(Is64Aware, "invalid 64-bit offset in 32-bit builder");
|
||||
|
||||
// Store how big the 64-bit region of the buffer is, so we can determine
|
||||
// where the 32/64 bit boundary is.
|
||||
length_of_64_bit_region_ = GetSize();
|
||||
|
||||
return length_of_64_bit_region_;
|
||||
}
|
||||
};
|
||||
/// @}
|
||||
|
||||
// Hack to `FlatBufferBuilder` mean `FlatBufferBuilder<false>` or
|
||||
// `FlatBufferBuilder<>`, where the template < > syntax is required.
|
||||
typedef FlatBufferBuilderImpl<false> FlatBufferBuilder;
|
||||
typedef FlatBufferBuilderImpl<true> FlatBufferBuilder64;
|
||||
|
||||
// These are external due to GCC not allowing them in the class.
|
||||
// See: https://stackoverflow.com/q/8061456/868247
|
||||
template<>
|
||||
template<>
|
||||
inline Offset64<String> FlatBufferBuilder64::CreateString(const char *str,
|
||||
size_t len) {
|
||||
CanAddOffset64();
|
||||
CreateStringImpl(str, len);
|
||||
return Offset64<String>(
|
||||
CalculateOffset<typename Offset64<String>::offset_type>());
|
||||
}
|
||||
|
||||
// Used to distinguish from real Offsets.
|
||||
template<typename T = void> struct EmptyOffset {};
|
||||
|
||||
// TODO(derekbailey): it would be nice to combine these two methods.
|
||||
template<>
|
||||
template<>
|
||||
inline void FlatBufferBuilder64::StartVector<Offset64, uint32_t>(
|
||||
size_t len, size_t elemsize, size_t alignment) {
|
||||
CanAddOffset64();
|
||||
StartVector<EmptyOffset, uint32_t>(len, elemsize, alignment);
|
||||
}
|
||||
|
||||
template<>
|
||||
template<>
|
||||
inline void FlatBufferBuilder64::StartVector<Offset64, uint64_t>(
|
||||
size_t len, size_t elemsize, size_t alignment) {
|
||||
CanAddOffset64();
|
||||
StartVector<EmptyOffset, uint64_t>(len, elemsize, alignment);
|
||||
}
|
||||
|
||||
/// Helpers to get a typed pointer to objects that are currently being built.
|
||||
/// @warning Creating new objects will lead to reallocations and invalidates
|
||||
/// the pointer!
|
||||
@ -1212,15 +1466,6 @@ const T *GetTemporaryPointer(FlatBufferBuilder &fbb, Offset<T> offset) {
|
||||
return GetMutableTemporaryPointer<T>(fbb, offset);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void FlatBufferBuilder::Required(Offset<T> table, voffset_t field) {
|
||||
auto table_ptr = reinterpret_cast<const Table *>(buf_.data_at(table.o));
|
||||
bool ok = table_ptr->GetOptionalFieldOffset(field) != 0;
|
||||
// If this fails, the caller will show what field needs to be set.
|
||||
FLATBUFFERS_ASSERT(ok);
|
||||
(void)ok;
|
||||
}
|
||||
|
||||
} // namespace flatbuffers
|
||||
|
||||
#endif // FLATBUFFERS_VECTOR_DOWNWARD_H_
|
||||
#endif // FLATBUFFERS_FLATBUFFER_BUILDER_H_
|
||||
|
@ -76,8 +76,9 @@ inline const uint8_t *GetBufferStartFromRootPointer(const void *root) {
|
||||
}
|
||||
|
||||
/// @brief This return the prefixed size of a FlatBuffer.
|
||||
inline uoffset_t GetPrefixedSize(const uint8_t *buf) {
|
||||
return ReadScalar<uoffset_t>(buf);
|
||||
template<typename SizeT = uoffset_t>
|
||||
inline SizeT GetPrefixedSize(const uint8_t *buf) {
|
||||
return ReadScalar<SizeT>(buf);
|
||||
}
|
||||
|
||||
// Base class for native objects (FlatBuffer data de-serialized into native
|
||||
|
@ -45,26 +45,27 @@ namespace flatbuffers {
|
||||
// of type tokens.
|
||||
// clang-format off
|
||||
#define FLATBUFFERS_GEN_TYPES_SCALAR(TD) \
|
||||
TD(NONE, "", uint8_t, byte, byte, byte, uint8, u8, UByte, UInt8, 0) \
|
||||
TD(UTYPE, "", uint8_t, byte, byte, byte, uint8, u8, UByte, UInt8, 1) /* begin scalar/int */ \
|
||||
TD(BOOL, "bool", uint8_t, boolean,bool, bool, bool, bool, Boolean, Bool, 2) \
|
||||
TD(CHAR, "byte", int8_t, byte, int8, sbyte, int8, i8, Byte, Int8, 3) \
|
||||
TD(UCHAR, "ubyte", uint8_t, byte, byte, byte, uint8, u8, UByte, UInt8, 4) \
|
||||
TD(SHORT, "short", int16_t, short, int16, short, int16, i16, Short, Int16, 5) \
|
||||
TD(USHORT, "ushort", uint16_t, short, uint16, ushort, uint16, u16, UShort, UInt16, 6) \
|
||||
TD(INT, "int", int32_t, int, int32, int, int32, i32, Int, Int32, 7) \
|
||||
TD(UINT, "uint", uint32_t, int, uint32, uint, uint32, u32, UInt, UInt32, 8) \
|
||||
TD(LONG, "long", int64_t, long, int64, long, int64, i64, Long, Int64, 9) \
|
||||
TD(ULONG, "ulong", uint64_t, long, uint64, ulong, uint64, u64, ULong, UInt64, 10) /* end int */ \
|
||||
TD(FLOAT, "float", float, float, float32, float, float32, f32, Float, Float32, 11) /* begin float */ \
|
||||
TD(DOUBLE, "double", double, double, float64, double, float64, f64, Double, Double, 12) /* end float/scalar */
|
||||
TD(NONE, "", uint8_t, byte, byte, byte, uint8, u8, UByte, UInt8, 0) \
|
||||
TD(UTYPE, "", uint8_t, byte, byte, byte, uint8, u8, UByte, UInt8, 1) /* begin scalar/int */ \
|
||||
TD(BOOL, "bool", uint8_t, boolean,bool, bool, bool, bool, Boolean, Bool, 2) \
|
||||
TD(CHAR, "byte", int8_t, byte, int8, sbyte, int8, i8, Byte, Int8, 3) \
|
||||
TD(UCHAR, "ubyte", uint8_t, byte, byte, byte, uint8, u8, UByte, UInt8, 4) \
|
||||
TD(SHORT, "short", int16_t, short, int16, short, int16, i16, Short, Int16, 5) \
|
||||
TD(USHORT, "ushort", uint16_t, short, uint16, ushort, uint16, u16, UShort, UInt16, 6) \
|
||||
TD(INT, "int", int32_t, int, int32, int, int32, i32, Int, Int32, 7) \
|
||||
TD(UINT, "uint", uint32_t, int, uint32, uint, uint32, u32, UInt, UInt32, 8) \
|
||||
TD(LONG, "long", int64_t, long, int64, long, int64, i64, Long, Int64, 9) \
|
||||
TD(ULONG, "ulong", uint64_t, long, uint64, ulong, uint64, u64, ULong, UInt64, 10) /* end int */ \
|
||||
TD(FLOAT, "float", float, float, float32, float, float32, f32, Float, Float32, 11) /* begin float */ \
|
||||
TD(DOUBLE, "double", double, double, float64, double, float64, f64, Double, Double, 12) /* end float/scalar */
|
||||
#define FLATBUFFERS_GEN_TYPES_POINTER(TD) \
|
||||
TD(STRING, "string", Offset<void>, int, int, StringOffset, int, unused, Int, Offset<String>, 13) \
|
||||
TD(VECTOR, "", Offset<void>, int, int, VectorOffset, int, unused, Int, Offset<UOffset>, 14) \
|
||||
TD(STRUCT, "", Offset<void>, int, int, int, int, unused, Int, Offset<UOffset>, 15) \
|
||||
TD(UNION, "", Offset<void>, int, int, int, int, unused, Int, Offset<UOffset>, 16)
|
||||
TD(STRING, "string", Offset<void>, int, int, StringOffset, int, unused, Int, Offset<String>, 13) \
|
||||
TD(VECTOR, "", Offset<void>, int, int, VectorOffset, int, unused, Int, Offset<UOffset>, 14) \
|
||||
TD(VECTOR64, "", Offset64<void>, int, int, VectorOffset, int, unused, Int, Offset<UOffset>, 18) \
|
||||
TD(STRUCT, "", Offset<void>, int, int, int, int, unused, Int, Offset<UOffset>, 15) \
|
||||
TD(UNION, "", Offset<void>, int, int, int, int, unused, Int, Offset<UOffset>, 16)
|
||||
#define FLATBUFFERS_GEN_TYPE_ARRAY(TD) \
|
||||
TD(ARRAY, "", int, int, int, int, int, unused, Int, Offset<UOffset>, 17)
|
||||
TD(ARRAY, "", int, int, int, int, int, unused, Int, Offset<UOffset>, 17)
|
||||
// The fields are:
|
||||
// - enum
|
||||
// - FlatBuffers schema type.
|
||||
@ -139,6 +140,8 @@ inline bool IsLong (BaseType t) { return t == BASE_TYPE_LONG ||
|
||||
inline bool IsBool (BaseType t) { return t == BASE_TYPE_BOOL; }
|
||||
inline bool IsOneByte(BaseType t) { return t >= BASE_TYPE_UTYPE &&
|
||||
t <= BASE_TYPE_UCHAR; }
|
||||
inline bool IsVector (BaseType t) { return t == BASE_TYPE_VECTOR ||
|
||||
t == BASE_TYPE_VECTOR64; }
|
||||
|
||||
inline bool IsUnsigned(BaseType t) {
|
||||
return (t == BASE_TYPE_UTYPE) || (t == BASE_TYPE_UCHAR) ||
|
||||
@ -210,7 +213,8 @@ struct Type {
|
||||
bool Deserialize(const Parser &parser, const reflection::Type *type);
|
||||
|
||||
BaseType base_type;
|
||||
BaseType element; // only set if t == BASE_TYPE_VECTOR
|
||||
BaseType element; // only set if t == BASE_TYPE_VECTOR or
|
||||
// BASE_TYPE_VECTOR64
|
||||
StructDef *struct_def; // only set if t or element == BASE_TYPE_STRUCT
|
||||
EnumDef *enum_def; // set if t == BASE_TYPE_UNION / BASE_TYPE_UTYPE,
|
||||
// or for an integral type derived from an enum.
|
||||
@ -326,6 +330,7 @@ struct FieldDef : public Definition {
|
||||
shared(false),
|
||||
native_inline(false),
|
||||
flexbuffer(false),
|
||||
offset64(false),
|
||||
presence(kDefault),
|
||||
nested_flatbuffer(nullptr),
|
||||
padding(0),
|
||||
@ -352,6 +357,7 @@ struct FieldDef : public Definition {
|
||||
bool native_inline; // Field will be defined inline (instead of as a pointer)
|
||||
// for native tables if field is a struct.
|
||||
bool flexbuffer; // This field contains FlexBuffer data.
|
||||
bool offset64; // If the field uses 64-bit offsets.
|
||||
|
||||
enum Presence {
|
||||
// Field must always be present.
|
||||
@ -528,9 +534,7 @@ inline bool IsUnionType(const Type &type) {
|
||||
return IsUnion(type) && IsInteger(type.base_type);
|
||||
}
|
||||
|
||||
inline bool IsVector(const Type &type) {
|
||||
return type.base_type == BASE_TYPE_VECTOR;
|
||||
}
|
||||
inline bool IsVector(const Type &type) { return IsVector(type.base_type); }
|
||||
|
||||
inline bool IsVectorOfStruct(const Type &type) {
|
||||
return IsVector(type) && IsStruct(type.VectorType());
|
||||
@ -952,6 +956,13 @@ class Parser : public ParserState {
|
||||
known_attributes_["native_default"] = true;
|
||||
known_attributes_["flexbuffer"] = true;
|
||||
known_attributes_["private"] = true;
|
||||
|
||||
// An attribute added to a field to indicate that is uses 64-bit addressing.
|
||||
known_attributes_["offset64"] = true;
|
||||
|
||||
// An attribute added to a vector field to indicate that it uses 64-bit
|
||||
// addressing and it has a 64-bit length.
|
||||
known_attributes_["vector64"] = true;
|
||||
}
|
||||
|
||||
// Copying is not allowed
|
||||
@ -1062,7 +1073,7 @@ class Parser : public ParserState {
|
||||
FLATBUFFERS_CHECKED_ERROR ParseAnyValue(Value &val, FieldDef *field,
|
||||
size_t parent_fieldn,
|
||||
const StructDef *parent_struct_def,
|
||||
uoffset_t count,
|
||||
size_t count,
|
||||
bool inside_vector = false);
|
||||
template<typename F>
|
||||
FLATBUFFERS_CHECKED_ERROR ParseTableDelimiters(size_t &fieldn,
|
||||
@ -1074,7 +1085,7 @@ class Parser : public ParserState {
|
||||
void SerializeStruct(FlatBufferBuilder &builder, const StructDef &struct_def,
|
||||
const Value &val);
|
||||
template<typename F>
|
||||
FLATBUFFERS_CHECKED_ERROR ParseVectorDelimiters(uoffset_t &count, F body);
|
||||
FLATBUFFERS_CHECKED_ERROR ParseVectorDelimiters(size_t &count, F body);
|
||||
FLATBUFFERS_CHECKED_ERROR ParseVector(const Type &type, uoffset_t *ovalue,
|
||||
FieldDef *field, size_t fieldn);
|
||||
FLATBUFFERS_CHECKED_ERROR ParseArray(Value &array);
|
||||
@ -1139,6 +1150,7 @@ class Parser : public ParserState {
|
||||
bool SupportsAdvancedArrayFeatures() const;
|
||||
bool SupportsOptionalScalars() const;
|
||||
bool SupportsDefaultVectorsAndStrings() const;
|
||||
bool Supports64BitOffsets() const;
|
||||
Namespace *UniqueNamespace(Namespace *ns);
|
||||
|
||||
FLATBUFFERS_CHECKED_ERROR RecurseError();
|
||||
@ -1288,8 +1300,7 @@ extern bool GenerateSwift(const Parser &parser, const std::string &path,
|
||||
// Generate a schema file from the internal representation, useful after
|
||||
// parsing a .proto schema.
|
||||
extern std::string GenerateFBS(const Parser &parser,
|
||||
const std::string &file_name,
|
||||
bool no_log);
|
||||
const std::string &file_name, bool no_log);
|
||||
extern bool GenerateFBS(const Parser &parser, const std::string &path,
|
||||
const std::string &file_name, bool no_log);
|
||||
|
||||
|
@ -66,6 +66,7 @@ inline size_t GetTypeSize(reflection::BaseType base_type) {
|
||||
4, // Union
|
||||
0, // Array. Only used in structs. 0 was chosen to prevent out-of-bounds
|
||||
// errors.
|
||||
8, // Vector64
|
||||
|
||||
0 // MaxBaseType. This must be kept the last entry in this array.
|
||||
};
|
||||
|
@ -64,10 +64,11 @@ enum BaseType {
|
||||
Obj = 15,
|
||||
Union = 16,
|
||||
Array = 17,
|
||||
MaxBaseType = 18
|
||||
Vector64 = 18,
|
||||
MaxBaseType = 19
|
||||
};
|
||||
|
||||
inline const BaseType (&EnumValuesBaseType())[19] {
|
||||
inline const BaseType (&EnumValuesBaseType())[20] {
|
||||
static const BaseType values[] = {
|
||||
None,
|
||||
UType,
|
||||
@ -87,13 +88,14 @@ inline const BaseType (&EnumValuesBaseType())[19] {
|
||||
Obj,
|
||||
Union,
|
||||
Array,
|
||||
Vector64,
|
||||
MaxBaseType
|
||||
};
|
||||
return values;
|
||||
}
|
||||
|
||||
inline const char * const *EnumNamesBaseType() {
|
||||
static const char * const names[20] = {
|
||||
static const char * const names[21] = {
|
||||
"None",
|
||||
"UType",
|
||||
"Bool",
|
||||
@ -112,6 +114,7 @@ inline const char * const *EnumNamesBaseType() {
|
||||
"Obj",
|
||||
"Union",
|
||||
"Array",
|
||||
"Vector64",
|
||||
"MaxBaseType",
|
||||
nullptr
|
||||
};
|
||||
@ -601,7 +604,8 @@ struct Field FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
|
||||
VT_ATTRIBUTES = 22,
|
||||
VT_DOCUMENTATION = 24,
|
||||
VT_OPTIONAL = 26,
|
||||
VT_PADDING = 28
|
||||
VT_PADDING = 28,
|
||||
VT_OFFSET64 = 30
|
||||
};
|
||||
const ::flatbuffers::String *name() const {
|
||||
return GetPointer<const ::flatbuffers::String *>(VT_NAME);
|
||||
@ -649,6 +653,10 @@ struct Field FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
|
||||
uint16_t padding() const {
|
||||
return GetField<uint16_t>(VT_PADDING, 0);
|
||||
}
|
||||
/// If the field uses 64-bit offsets.
|
||||
bool offset64() const {
|
||||
return GetField<uint8_t>(VT_OFFSET64, 0) != 0;
|
||||
}
|
||||
bool Verify(::flatbuffers::Verifier &verifier) const {
|
||||
return VerifyTableStart(verifier) &&
|
||||
VerifyOffsetRequired(verifier, VT_NAME) &&
|
||||
@ -670,6 +678,7 @@ struct Field FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
|
||||
verifier.VerifyVectorOfStrings(documentation()) &&
|
||||
VerifyField<uint8_t>(verifier, VT_OPTIONAL, 1) &&
|
||||
VerifyField<uint16_t>(verifier, VT_PADDING, 2) &&
|
||||
VerifyField<uint8_t>(verifier, VT_OFFSET64, 1) &&
|
||||
verifier.EndTable();
|
||||
}
|
||||
};
|
||||
@ -717,6 +726,9 @@ struct FieldBuilder {
|
||||
void add_padding(uint16_t padding) {
|
||||
fbb_.AddElement<uint16_t>(Field::VT_PADDING, padding, 0);
|
||||
}
|
||||
void add_offset64(bool offset64) {
|
||||
fbb_.AddElement<uint8_t>(Field::VT_OFFSET64, static_cast<uint8_t>(offset64), 0);
|
||||
}
|
||||
explicit FieldBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
|
||||
: fbb_(_fbb) {
|
||||
start_ = fbb_.StartTable();
|
||||
@ -744,7 +756,8 @@ inline ::flatbuffers::Offset<Field> CreateField(
|
||||
::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<reflection::KeyValue>>> attributes = 0,
|
||||
::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> documentation = 0,
|
||||
bool optional = false,
|
||||
uint16_t padding = 0) {
|
||||
uint16_t padding = 0,
|
||||
bool offset64 = false) {
|
||||
FieldBuilder builder_(_fbb);
|
||||
builder_.add_default_real(default_real);
|
||||
builder_.add_default_integer(default_integer);
|
||||
@ -755,6 +768,7 @@ inline ::flatbuffers::Offset<Field> CreateField(
|
||||
builder_.add_padding(padding);
|
||||
builder_.add_offset(offset);
|
||||
builder_.add_id(id);
|
||||
builder_.add_offset64(offset64);
|
||||
builder_.add_optional(optional);
|
||||
builder_.add_key(key);
|
||||
builder_.add_required(required);
|
||||
@ -776,7 +790,8 @@ inline ::flatbuffers::Offset<Field> CreateFieldDirect(
|
||||
std::vector<::flatbuffers::Offset<reflection::KeyValue>> *attributes = nullptr,
|
||||
const std::vector<::flatbuffers::Offset<::flatbuffers::String>> *documentation = nullptr,
|
||||
bool optional = false,
|
||||
uint16_t padding = 0) {
|
||||
uint16_t padding = 0,
|
||||
bool offset64 = false) {
|
||||
auto name__ = name ? _fbb.CreateString(name) : 0;
|
||||
auto attributes__ = attributes ? _fbb.CreateVectorOfSortedTables<reflection::KeyValue>(attributes) : 0;
|
||||
auto documentation__ = documentation ? _fbb.CreateVector<::flatbuffers::Offset<::flatbuffers::String>>(*documentation) : 0;
|
||||
@ -794,7 +809,8 @@ inline ::flatbuffers::Offset<Field> CreateFieldDirect(
|
||||
attributes__,
|
||||
documentation__,
|
||||
optional,
|
||||
padding);
|
||||
padding,
|
||||
offset64);
|
||||
}
|
||||
|
||||
struct Object FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
|
||||
|
@ -47,14 +47,24 @@ class Table {
|
||||
return field_offset ? ReadScalar<T>(data_ + field_offset) : defaultval;
|
||||
}
|
||||
|
||||
template<typename P> P GetPointer(voffset_t field) {
|
||||
template<typename P, typename OffsetSize = uoffset_t>
|
||||
P GetPointer(voffset_t field) {
|
||||
auto field_offset = GetOptionalFieldOffset(field);
|
||||
auto p = data_ + field_offset;
|
||||
return field_offset ? reinterpret_cast<P>(p + ReadScalar<uoffset_t>(p))
|
||||
return field_offset ? reinterpret_cast<P>(p + ReadScalar<OffsetSize>(p))
|
||||
: nullptr;
|
||||
}
|
||||
template<typename P> P GetPointer(voffset_t field) const {
|
||||
return const_cast<Table *>(this)->GetPointer<P>(field);
|
||||
template<typename P, typename OffsetSize = uoffset_t>
|
||||
P GetPointer(voffset_t field) const {
|
||||
return const_cast<Table *>(this)->GetPointer<P, OffsetSize>(field);
|
||||
}
|
||||
|
||||
template<typename P> P GetPointer64(voffset_t field) {
|
||||
return GetPointer<P, uoffset64_t>(field);
|
||||
}
|
||||
|
||||
template<typename P> P GetPointer64(voffset_t field) const {
|
||||
return GetPointer<P, uoffset64_t>(field);
|
||||
}
|
||||
|
||||
template<typename P> P GetStruct(voffset_t field) const {
|
||||
@ -131,15 +141,25 @@ class Table {
|
||||
}
|
||||
|
||||
// Versions for offsets.
|
||||
template<typename OffsetT = uoffset_t>
|
||||
bool VerifyOffset(const Verifier &verifier, voffset_t field) const {
|
||||
auto field_offset = GetOptionalFieldOffset(field);
|
||||
return !field_offset || verifier.VerifyOffset(data_, field_offset);
|
||||
return !field_offset || verifier.VerifyOffset<OffsetT>(data_, field_offset);
|
||||
}
|
||||
|
||||
template<typename OffsetT = uoffset_t>
|
||||
bool VerifyOffsetRequired(const Verifier &verifier, voffset_t field) const {
|
||||
auto field_offset = GetOptionalFieldOffset(field);
|
||||
return verifier.Check(field_offset != 0) &&
|
||||
verifier.VerifyOffset(data_, field_offset);
|
||||
verifier.VerifyOffset<OffsetT>(data_, field_offset);
|
||||
}
|
||||
|
||||
bool VerifyOffset64(const Verifier &verifier, voffset_t field) const {
|
||||
return VerifyOffset<uoffset64_t>(verifier, field);
|
||||
}
|
||||
|
||||
bool VerifyOffset64Required(const Verifier &verifier, voffset_t field) const {
|
||||
return VerifyOffsetRequired<uoffset64_t>(verifier, field);
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -27,7 +27,8 @@ struct String;
|
||||
|
||||
// An STL compatible iterator implementation for Vector below, effectively
|
||||
// calling Get() for every element.
|
||||
template<typename T, typename IT, typename Data = uint8_t *>
|
||||
template<typename T, typename IT, typename Data = uint8_t *,
|
||||
typename SizeT = uoffset_t>
|
||||
struct VectorIterator {
|
||||
typedef std::random_access_iterator_tag iterator_category;
|
||||
typedef IT value_type;
|
||||
@ -35,8 +36,9 @@ struct VectorIterator {
|
||||
typedef IT *pointer;
|
||||
typedef IT &reference;
|
||||
|
||||
VectorIterator(Data data, uoffset_t i)
|
||||
: data_(data + IndirectHelper<T>::element_stride * i) {}
|
||||
static const SizeT element_stride = IndirectHelper<T>::element_stride;
|
||||
|
||||
VectorIterator(Data data, SizeT i) : data_(data + element_stride * i) {}
|
||||
VectorIterator(const VectorIterator &other) : data_(other.data_) {}
|
||||
VectorIterator() : data_(nullptr) {}
|
||||
|
||||
@ -63,7 +65,7 @@ struct VectorIterator {
|
||||
}
|
||||
|
||||
difference_type operator-(const VectorIterator &other) const {
|
||||
return (data_ - other.data_) / IndirectHelper<T>::element_stride;
|
||||
return (data_ - other.data_) / element_stride;
|
||||
}
|
||||
|
||||
// Note: return type is incompatible with the standard
|
||||
@ -75,44 +77,42 @@ struct VectorIterator {
|
||||
IT operator->() const { return IndirectHelper<T>::Read(data_, 0); }
|
||||
|
||||
VectorIterator &operator++() {
|
||||
data_ += IndirectHelper<T>::element_stride;
|
||||
data_ += element_stride;
|
||||
return *this;
|
||||
}
|
||||
|
||||
VectorIterator operator++(int) {
|
||||
VectorIterator temp(data_, 0);
|
||||
data_ += IndirectHelper<T>::element_stride;
|
||||
data_ += element_stride;
|
||||
return temp;
|
||||
}
|
||||
|
||||
VectorIterator operator+(const uoffset_t &offset) const {
|
||||
return VectorIterator(data_ + offset * IndirectHelper<T>::element_stride,
|
||||
0);
|
||||
VectorIterator operator+(const SizeT &offset) const {
|
||||
return VectorIterator(data_ + offset * element_stride, 0);
|
||||
}
|
||||
|
||||
VectorIterator &operator+=(const uoffset_t &offset) {
|
||||
data_ += offset * IndirectHelper<T>::element_stride;
|
||||
VectorIterator &operator+=(const SizeT &offset) {
|
||||
data_ += offset * element_stride;
|
||||
return *this;
|
||||
}
|
||||
|
||||
VectorIterator &operator--() {
|
||||
data_ -= IndirectHelper<T>::element_stride;
|
||||
data_ -= element_stride;
|
||||
return *this;
|
||||
}
|
||||
|
||||
VectorIterator operator--(int) {
|
||||
VectorIterator temp(data_, 0);
|
||||
data_ -= IndirectHelper<T>::element_stride;
|
||||
data_ -= element_stride;
|
||||
return temp;
|
||||
}
|
||||
|
||||
VectorIterator operator-(const uoffset_t &offset) const {
|
||||
return VectorIterator(data_ - offset * IndirectHelper<T>::element_stride,
|
||||
0);
|
||||
VectorIterator operator-(const SizeT &offset) const {
|
||||
return VectorIterator(data_ - offset * element_stride, 0);
|
||||
}
|
||||
|
||||
VectorIterator &operator-=(const uoffset_t &offset) {
|
||||
data_ -= offset * IndirectHelper<T>::element_stride;
|
||||
VectorIterator &operator-=(const SizeT &offset) {
|
||||
data_ -= offset * element_stride;
|
||||
return *this;
|
||||
}
|
||||
|
||||
@ -120,8 +120,8 @@ struct VectorIterator {
|
||||
Data data_;
|
||||
};
|
||||
|
||||
template<typename T, typename IT>
|
||||
using VectorConstIterator = VectorIterator<T, IT, const uint8_t *>;
|
||||
template<typename T, typename IT, typename SizeT = uoffset_t>
|
||||
using VectorConstIterator = VectorIterator<T, IT, const uint8_t *, SizeT>;
|
||||
|
||||
template<typename Iterator>
|
||||
struct VectorReverseIterator : public std::reverse_iterator<Iterator> {
|
||||
@ -145,11 +145,14 @@ struct VectorReverseIterator : public std::reverse_iterator<Iterator> {
|
||||
|
||||
// This is used as a helper type for accessing vectors.
|
||||
// Vector::data() assumes the vector elements start after the length field.
|
||||
template<typename T> class Vector {
|
||||
template<typename T, typename SizeT = uoffset_t> class Vector {
|
||||
public:
|
||||
typedef VectorIterator<T, typename IndirectHelper<T>::mutable_return_type>
|
||||
typedef VectorIterator<T,
|
||||
typename IndirectHelper<T>::mutable_return_type,
|
||||
uint8_t *, SizeT>
|
||||
iterator;
|
||||
typedef VectorConstIterator<T, typename IndirectHelper<T>::return_type>
|
||||
typedef VectorConstIterator<T, typename IndirectHelper<T>::return_type,
|
||||
SizeT>
|
||||
const_iterator;
|
||||
typedef VectorReverseIterator<iterator> reverse_iterator;
|
||||
typedef VectorReverseIterator<const_iterator> const_reverse_iterator;
|
||||
@ -160,39 +163,41 @@ template<typename T> class Vector {
|
||||
static FLATBUFFERS_CONSTEXPR bool is_span_observable =
|
||||
scalar_tag::value && (FLATBUFFERS_LITTLEENDIAN || sizeof(T) == 1);
|
||||
|
||||
uoffset_t size() const { return EndianScalar(length_); }
|
||||
SizeT size() const { return EndianScalar(length_); }
|
||||
|
||||
// Deprecated: use size(). Here for backwards compatibility.
|
||||
FLATBUFFERS_ATTRIBUTE([[deprecated("use size() instead")]])
|
||||
uoffset_t Length() const { return size(); }
|
||||
SizeT Length() const { return size(); }
|
||||
|
||||
typedef SizeT size_type;
|
||||
typedef typename IndirectHelper<T>::return_type return_type;
|
||||
typedef typename IndirectHelper<T>::mutable_return_type mutable_return_type;
|
||||
typedef typename IndirectHelper<T>::mutable_return_type
|
||||
mutable_return_type;
|
||||
typedef return_type value_type;
|
||||
|
||||
return_type Get(uoffset_t i) const {
|
||||
return_type Get(SizeT i) const {
|
||||
FLATBUFFERS_ASSERT(i < size());
|
||||
return IndirectHelper<T>::Read(Data(), i);
|
||||
}
|
||||
|
||||
return_type operator[](uoffset_t i) const { return Get(i); }
|
||||
return_type operator[](SizeT i) const { return Get(i); }
|
||||
|
||||
// If this is a Vector of enums, T will be its storage type, not the enum
|
||||
// type. This function makes it convenient to retrieve value with enum
|
||||
// type E.
|
||||
template<typename E> E GetEnum(uoffset_t i) const {
|
||||
template<typename E> E GetEnum(SizeT i) const {
|
||||
return static_cast<E>(Get(i));
|
||||
}
|
||||
|
||||
// If this a vector of unions, this does the cast for you. There's no check
|
||||
// to make sure this is the right type!
|
||||
template<typename U> const U *GetAs(uoffset_t i) const {
|
||||
template<typename U> const U *GetAs(SizeT i) const {
|
||||
return reinterpret_cast<const U *>(Get(i));
|
||||
}
|
||||
|
||||
// If this a vector of unions, this does the cast for you. There's no check
|
||||
// to make sure this is actually a string!
|
||||
const String *GetAsString(uoffset_t i) const {
|
||||
const String *GetAsString(SizeT i) const {
|
||||
return reinterpret_cast<const String *>(Get(i));
|
||||
}
|
||||
|
||||
@ -226,7 +231,7 @@ template<typename T> class Vector {
|
||||
|
||||
// Change elements if you have a non-const pointer to this object.
|
||||
// Scalars only. See reflection.h, and the documentation.
|
||||
void Mutate(uoffset_t i, const T &val) {
|
||||
void Mutate(SizeT i, const T &val) {
|
||||
FLATBUFFERS_ASSERT(i < size());
|
||||
WriteScalar(data() + i, val);
|
||||
}
|
||||
@ -234,15 +239,15 @@ template<typename T> class Vector {
|
||||
// Change an element of a vector of tables (or strings).
|
||||
// "val" points to the new table/string, as you can obtain from
|
||||
// e.g. reflection::AddFlatBuffer().
|
||||
void MutateOffset(uoffset_t i, const uint8_t *val) {
|
||||
void MutateOffset(SizeT i, const uint8_t *val) {
|
||||
FLATBUFFERS_ASSERT(i < size());
|
||||
static_assert(sizeof(T) == sizeof(uoffset_t), "Unrelated types");
|
||||
static_assert(sizeof(T) == sizeof(SizeT), "Unrelated types");
|
||||
WriteScalar(data() + i,
|
||||
static_cast<uoffset_t>(val - (Data() + i * sizeof(uoffset_t))));
|
||||
static_cast<SizeT>(val - (Data() + i * sizeof(SizeT))));
|
||||
}
|
||||
|
||||
// Get a mutable pointer to tables/strings inside this vector.
|
||||
mutable_return_type GetMutableObject(uoffset_t i) const {
|
||||
mutable_return_type GetMutableObject(SizeT i) const {
|
||||
FLATBUFFERS_ASSERT(i < size());
|
||||
return const_cast<mutable_return_type>(IndirectHelper<T>::Read(Data(), i));
|
||||
}
|
||||
@ -280,7 +285,7 @@ template<typename T> class Vector {
|
||||
// try to construct these manually.
|
||||
Vector();
|
||||
|
||||
uoffset_t length_;
|
||||
SizeT length_;
|
||||
|
||||
private:
|
||||
// This class is a pointer. Copying will therefore create an invalid object.
|
||||
@ -299,6 +304,8 @@ template<typename T> class Vector {
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T> using Vector64 = Vector<T, uoffset64_t>;
|
||||
|
||||
template<class U>
|
||||
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<U> make_span(Vector<U> &vec)
|
||||
FLATBUFFERS_NOEXCEPT {
|
||||
|
@ -17,6 +17,8 @@
|
||||
#ifndef FLATBUFFERS_VECTOR_DOWNWARD_H_
|
||||
#define FLATBUFFERS_VECTOR_DOWNWARD_H_
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "flatbuffers/base.h"
|
||||
@ -31,13 +33,15 @@ namespace flatbuffers {
|
||||
// Since this vector leaves the lower part unused, we support a "scratch-pad"
|
||||
// that can be stored there for temporary data, to share the allocated space.
|
||||
// Essentially, this supports 2 std::vectors in a single buffer.
|
||||
class vector_downward {
|
||||
template<typename SizeT = uoffset_t> class vector_downward {
|
||||
public:
|
||||
explicit vector_downward(size_t initial_size, Allocator *allocator,
|
||||
bool own_allocator, size_t buffer_minalign)
|
||||
bool own_allocator, size_t buffer_minalign,
|
||||
const SizeT max_size = FLATBUFFERS_MAX_BUFFER_SIZE)
|
||||
: allocator_(allocator),
|
||||
own_allocator_(own_allocator),
|
||||
initial_size_(initial_size),
|
||||
max_size_(max_size),
|
||||
buffer_minalign_(buffer_minalign),
|
||||
reserved_(0),
|
||||
size_(0),
|
||||
@ -50,6 +54,7 @@ class vector_downward {
|
||||
: allocator_(other.allocator_),
|
||||
own_allocator_(other.own_allocator_),
|
||||
initial_size_(other.initial_size_),
|
||||
max_size_(other.max_size_),
|
||||
buffer_minalign_(other.buffer_minalign_),
|
||||
reserved_(other.reserved_),
|
||||
size_(other.size_),
|
||||
@ -111,7 +116,7 @@ class vector_downward {
|
||||
uint8_t *release_raw(size_t &allocated_bytes, size_t &offset) {
|
||||
auto *buf = buf_;
|
||||
allocated_bytes = reserved_;
|
||||
offset = static_cast<size_t>(cur_ - buf_);
|
||||
offset = vector_downward::offset();
|
||||
|
||||
// release_raw only relinquishes the buffer ownership.
|
||||
// Does not deallocate or reset the allocator. Destructor will do that.
|
||||
@ -136,10 +141,10 @@ class vector_downward {
|
||||
|
||||
size_t ensure_space(size_t len) {
|
||||
FLATBUFFERS_ASSERT(cur_ >= scratch_ && scratch_ >= buf_);
|
||||
if (len > static_cast<size_t>(cur_ - scratch_)) { reallocate(len); }
|
||||
// Beyond this, signed offsets may not have enough range:
|
||||
// (FlatBuffers > 2GB not supported).
|
||||
FLATBUFFERS_ASSERT(size() < FLATBUFFERS_MAX_BUFFER_SIZE);
|
||||
// If the length is larger than the unused part of the buffer, we need to
|
||||
// grow.
|
||||
if (len > unused_buffer_size()) { reallocate(len); }
|
||||
FLATBUFFERS_ASSERT(size() < max_size_);
|
||||
return len;
|
||||
}
|
||||
|
||||
@ -147,7 +152,7 @@ class vector_downward {
|
||||
if (len) {
|
||||
ensure_space(len);
|
||||
cur_ -= len;
|
||||
size_ += static_cast<uoffset_t>(len);
|
||||
size_ += static_cast<SizeT>(len);
|
||||
}
|
||||
return cur_;
|
||||
}
|
||||
@ -155,11 +160,17 @@ class vector_downward {
|
||||
// Returns nullptr if using the DefaultAllocator.
|
||||
Allocator *get_custom_allocator() { return allocator_; }
|
||||
|
||||
inline uoffset_t size() const { return size_; }
|
||||
// The current offset into the buffer.
|
||||
size_t offset() const { return cur_ - buf_; }
|
||||
|
||||
uoffset_t scratch_size() const {
|
||||
return static_cast<uoffset_t>(scratch_ - buf_);
|
||||
}
|
||||
// The total size of the vector (both the buffer and scratch parts).
|
||||
inline SizeT size() const { return size_; }
|
||||
|
||||
// The size of the buffer part of the vector that is currently unused.
|
||||
SizeT unused_buffer_size() const { return static_cast<SizeT>(cur_ - scratch_); }
|
||||
|
||||
// The size of the scratch part of the vector.
|
||||
SizeT scratch_size() const { return static_cast<SizeT>(scratch_ - buf_); }
|
||||
|
||||
size_t capacity() const { return reserved_; }
|
||||
|
||||
@ -211,7 +222,7 @@ class vector_downward {
|
||||
|
||||
void pop(size_t bytes_to_remove) {
|
||||
cur_ += bytes_to_remove;
|
||||
size_ -= static_cast<uoffset_t>(bytes_to_remove);
|
||||
size_ -= static_cast<SizeT>(bytes_to_remove);
|
||||
}
|
||||
|
||||
void scratch_pop(size_t bytes_to_remove) { scratch_ -= bytes_to_remove; }
|
||||
@ -224,6 +235,7 @@ class vector_downward {
|
||||
swap(buffer_minalign_, other.buffer_minalign_);
|
||||
swap(reserved_, other.reserved_);
|
||||
swap(size_, other.size_);
|
||||
swap(max_size_, other.max_size_);
|
||||
swap(buf_, other.buf_);
|
||||
swap(cur_, other.cur_);
|
||||
swap(scratch_, other.scratch_);
|
||||
@ -243,9 +255,12 @@ class vector_downward {
|
||||
Allocator *allocator_;
|
||||
bool own_allocator_;
|
||||
size_t initial_size_;
|
||||
|
||||
// The maximum size the vector can be.
|
||||
SizeT max_size_;
|
||||
size_t buffer_minalign_;
|
||||
size_t reserved_;
|
||||
uoffset_t size_;
|
||||
SizeT size_;
|
||||
uint8_t *buf_;
|
||||
uint8_t *cur_; // Points at location between empty (below) and used (above).
|
||||
uint8_t *scratch_; // Points to the end of the scratchpad in use.
|
||||
|
@ -34,12 +34,16 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
|
||||
bool check_alignment = true;
|
||||
// If true, run verifier on nested flatbuffers
|
||||
bool check_nested_flatbuffers = true;
|
||||
// The maximum size of a buffer.
|
||||
size_t max_size = FLATBUFFERS_MAX_BUFFER_SIZE;
|
||||
// Use assertions to check for errors.
|
||||
bool assert = false;
|
||||
};
|
||||
|
||||
explicit Verifier(const uint8_t *const buf, const size_t buf_len,
|
||||
const Options &opts)
|
||||
: buf_(buf), size_(buf_len), opts_(opts) {
|
||||
FLATBUFFERS_ASSERT(size_ < FLATBUFFERS_MAX_BUFFER_SIZE);
|
||||
FLATBUFFERS_ASSERT(size_ < opts.max_size);
|
||||
}
|
||||
|
||||
// Deprecated API, please construct with Verifier::Options.
|
||||
@ -58,7 +62,7 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
|
||||
bool Check(const bool ok) const {
|
||||
// clang-format off
|
||||
#ifdef FLATBUFFERS_DEBUG_VERIFICATION_FAILURE
|
||||
FLATBUFFERS_ASSERT(ok);
|
||||
if (opts_.assert) { FLATBUFFERS_ASSERT(ok); }
|
||||
#endif
|
||||
#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
|
||||
if (!ok)
|
||||
@ -113,41 +117,43 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
|
||||
}
|
||||
|
||||
// Verify a pointer (may be NULL) of any vector type.
|
||||
template<typename T> bool VerifyVector(const Vector<T> *const vec) const {
|
||||
return !vec || VerifyVectorOrString(reinterpret_cast<const uint8_t *>(vec),
|
||||
sizeof(T));
|
||||
template<int &..., typename T, typename LenT>
|
||||
bool VerifyVector(const Vector<T, LenT> *const vec) const {
|
||||
return !vec || VerifyVectorOrString<LenT>(
|
||||
reinterpret_cast<const uint8_t *>(vec), sizeof(T));
|
||||
}
|
||||
|
||||
// Verify a pointer (may be NULL) of a vector to struct.
|
||||
template<typename T>
|
||||
bool VerifyVector(const Vector<const T *> *const vec) const {
|
||||
return VerifyVector(reinterpret_cast<const Vector<T> *>(vec));
|
||||
template<int &..., typename T, typename LenT>
|
||||
bool VerifyVector(const Vector<const T *, LenT> *const vec) const {
|
||||
return VerifyVector(reinterpret_cast<const Vector<T, LenT> *>(vec));
|
||||
}
|
||||
|
||||
// Verify a pointer (may be NULL) to string.
|
||||
bool VerifyString(const String *const str) const {
|
||||
size_t end;
|
||||
return !str || (VerifyVectorOrString(reinterpret_cast<const uint8_t *>(str),
|
||||
1, &end) &&
|
||||
return !str || (VerifyVectorOrString<uoffset_t>(
|
||||
reinterpret_cast<const uint8_t *>(str), 1, &end) &&
|
||||
Verify(end, 1) && // Must have terminator
|
||||
Check(buf_[end] == '\0')); // Terminating byte must be 0.
|
||||
}
|
||||
|
||||
// Common code between vectors and strings.
|
||||
template<typename LenT = uoffset_t>
|
||||
bool VerifyVectorOrString(const uint8_t *const vec, const size_t elem_size,
|
||||
size_t *const end = nullptr) const {
|
||||
const auto veco = static_cast<size_t>(vec - buf_);
|
||||
const auto vec_offset = static_cast<size_t>(vec - buf_);
|
||||
// Check we can read the size field.
|
||||
if (!Verify<uoffset_t>(veco)) return false;
|
||||
if (!Verify<LenT>(vec_offset)) return false;
|
||||
// Check the whole array. If this is a string, the byte past the array must
|
||||
// be 0.
|
||||
const auto size = ReadScalar<uoffset_t>(vec);
|
||||
const auto max_elems = FLATBUFFERS_MAX_BUFFER_SIZE / elem_size;
|
||||
const LenT size = ReadScalar<LenT>(vec);
|
||||
const auto max_elems = opts_.max_size / elem_size;
|
||||
if (!Check(size < max_elems))
|
||||
return false; // Protect against byte_size overflowing.
|
||||
const auto byte_size = sizeof(size) + elem_size * size;
|
||||
if (end) *end = veco + byte_size;
|
||||
return Verify(veco, byte_size);
|
||||
const auto byte_size = sizeof(LenT) + elem_size * size;
|
||||
if (end) *end = vec_offset + byte_size;
|
||||
return Verify(vec_offset, byte_size);
|
||||
}
|
||||
|
||||
// Special case for string contents, after the above has been called.
|
||||
@ -203,7 +209,7 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
|
||||
}
|
||||
|
||||
// Call T::Verify, which must be in the generated code for this type.
|
||||
const auto o = VerifyOffset(start);
|
||||
const auto o = VerifyOffset<uoffset_t>(start);
|
||||
return Check(o != 0) &&
|
||||
reinterpret_cast<const T *>(buf_ + start + o)->Verify(*this)
|
||||
// clang-format off
|
||||
@ -214,8 +220,8 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
|
||||
// clang-format on
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
bool VerifyNestedFlatBuffer(const Vector<uint8_t> *const buf,
|
||||
template<typename T, int &..., typename SizeT>
|
||||
bool VerifyNestedFlatBuffer(const Vector<uint8_t, SizeT> *const buf,
|
||||
const char *const identifier) {
|
||||
// Caller opted out of this.
|
||||
if (!opts_.check_nested_flatbuffers) return true;
|
||||
@ -226,7 +232,7 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
|
||||
// If there is a nested buffer, it must be greater than the min size.
|
||||
if (!Check(buf->size() >= FLATBUFFERS_MIN_BUFFER_SIZE)) return false;
|
||||
|
||||
Verifier nested_verifier(buf->data(), buf->size());
|
||||
Verifier nested_verifier(buf->data(), buf->size(), opts_);
|
||||
return nested_verifier.VerifyBuffer<T>(identifier);
|
||||
}
|
||||
|
||||
@ -237,29 +243,30 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
|
||||
return VerifyBufferFromStart<T>(identifier, 0);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
template<typename T, typename SizeT = uoffset_t>
|
||||
bool VerifySizePrefixedBuffer(const char *const identifier) {
|
||||
return Verify<uoffset_t>(0U) &&
|
||||
Check(ReadScalar<uoffset_t>(buf_) == size_ - sizeof(uoffset_t)) &&
|
||||
VerifyBufferFromStart<T>(identifier, sizeof(uoffset_t));
|
||||
return Verify<SizeT>(0U) &&
|
||||
Check(ReadScalar<SizeT>(buf_) == size_ - sizeof(SizeT)) &&
|
||||
VerifyBufferFromStart<T>(identifier, sizeof(SizeT));
|
||||
}
|
||||
|
||||
uoffset_t VerifyOffset(const size_t start) const {
|
||||
if (!Verify<uoffset_t>(start)) return 0;
|
||||
const auto o = ReadScalar<uoffset_t>(buf_ + start);
|
||||
template<typename OffsetT = uoffset_t, typename SOffsetT = soffset_t>
|
||||
size_t VerifyOffset(const size_t start) const {
|
||||
if (!Verify<OffsetT>(start)) return 0;
|
||||
const auto o = ReadScalar<OffsetT>(buf_ + start);
|
||||
// May not point to itself.
|
||||
if (!Check(o != 0)) return 0;
|
||||
// Can't wrap around / buffers are max 2GB.
|
||||
if (!Check(static_cast<soffset_t>(o) >= 0)) return 0;
|
||||
// Can't wrap around larger than the max size.
|
||||
if (!Check(static_cast<SOffsetT>(o) >= 0)) return 0;
|
||||
// Must be inside the buffer to create a pointer from it (pointer outside
|
||||
// buffer is UB).
|
||||
if (!Verify(start + o, 1)) return 0;
|
||||
return o;
|
||||
}
|
||||
|
||||
uoffset_t VerifyOffset(const uint8_t *const base,
|
||||
const voffset_t start) const {
|
||||
return VerifyOffset(static_cast<size_t>(base - buf_) + start);
|
||||
template<typename OffsetT = uoffset_t>
|
||||
size_t VerifyOffset(const uint8_t *const base, const voffset_t start) const {
|
||||
return VerifyOffset<OffsetT>(static_cast<size_t>(base - buf_) + start);
|
||||
}
|
||||
|
||||
// Called at the start of a table to increase counters measuring data
|
||||
@ -312,6 +319,12 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
|
||||
std::vector<uint8_t> *flex_reuse_tracker_ = nullptr;
|
||||
};
|
||||
|
||||
// Specialization for 64-bit offsets.
|
||||
template<>
|
||||
inline size_t Verifier::VerifyOffset<uoffset64_t>(const size_t start) const {
|
||||
return VerifyOffset<uoffset64_t, soffset64_t>(start);
|
||||
}
|
||||
|
||||
} // namespace flatbuffers
|
||||
|
||||
#endif // FLATBUFFERS_VERIFIER_H_
|
||||
|
@ -23,9 +23,10 @@ public final class BaseType {
|
||||
public static final byte Obj = 15;
|
||||
public static final byte Union = 16;
|
||||
public static final byte Array = 17;
|
||||
public static final byte MaxBaseType = 18;
|
||||
public static final byte Vector64 = 18;
|
||||
public static final byte MaxBaseType = 19;
|
||||
|
||||
public static final String[] names = { "None", "UType", "Bool", "Byte", "UByte", "Short", "UShort", "Int", "UInt", "Long", "ULong", "Float", "Double", "String", "Vector", "Obj", "Union", "Array", "MaxBaseType", };
|
||||
public static final String[] names = { "None", "UType", "Bool", "Byte", "UByte", "Short", "UShort", "Int", "UInt", "Long", "ULong", "Float", "Double", "String", "Vector", "Obj", "Union", "Array", "Vector64", "MaxBaseType", };
|
||||
|
||||
public static String name(int e) { return names[e]; }
|
||||
}
|
||||
|
@ -55,6 +55,10 @@ public final class Field extends Table {
|
||||
* Number of padding octets to always add after this field. Structs only.
|
||||
*/
|
||||
public int padding() { int o = __offset(28); return o != 0 ? bb.getShort(o + bb_pos) & 0xFFFF : 0; }
|
||||
/**
|
||||
* If the field uses 64-bit offsets.
|
||||
*/
|
||||
public boolean offset64() { int o = __offset(30); return o != 0 ? 0!=bb.get(o + bb_pos) : false; }
|
||||
|
||||
public static int createField(FlatBufferBuilder builder,
|
||||
int nameOffset,
|
||||
@ -69,8 +73,9 @@ public final class Field extends Table {
|
||||
int attributesOffset,
|
||||
int documentationOffset,
|
||||
boolean optional,
|
||||
int padding) {
|
||||
builder.startTable(13);
|
||||
int padding,
|
||||
boolean offset64) {
|
||||
builder.startTable(14);
|
||||
Field.addDefaultReal(builder, defaultReal);
|
||||
Field.addDefaultInteger(builder, defaultInteger);
|
||||
Field.addDocumentation(builder, documentationOffset);
|
||||
@ -80,6 +85,7 @@ public final class Field extends Table {
|
||||
Field.addPadding(builder, padding);
|
||||
Field.addOffset(builder, offset);
|
||||
Field.addId(builder, id);
|
||||
Field.addOffset64(builder, offset64);
|
||||
Field.addOptional(builder, optional);
|
||||
Field.addKey(builder, key);
|
||||
Field.addRequired(builder, required);
|
||||
@ -87,7 +93,7 @@ public final class Field extends Table {
|
||||
return Field.endField(builder);
|
||||
}
|
||||
|
||||
public static void startField(FlatBufferBuilder builder) { builder.startTable(13); }
|
||||
public static void startField(FlatBufferBuilder builder) { builder.startTable(14); }
|
||||
public static void addName(FlatBufferBuilder builder, int nameOffset) { builder.addOffset(nameOffset); builder.slot(0); }
|
||||
public static void addType(FlatBufferBuilder builder, int typeOffset) { builder.addOffset(1, typeOffset, 0); }
|
||||
public static void addId(FlatBufferBuilder builder, int id) { builder.addShort(2, (short) id, (short) 0); }
|
||||
@ -105,6 +111,7 @@ public final class Field extends Table {
|
||||
public static void startDocumentationVector(FlatBufferBuilder builder, int numElems) { builder.startVector(4, numElems, 4); }
|
||||
public static void addOptional(FlatBufferBuilder builder, boolean optional) { builder.addBoolean(11, optional, false); }
|
||||
public static void addPadding(FlatBufferBuilder builder, int padding) { builder.addShort(12, (short) padding, (short) 0); }
|
||||
public static void addOffset64(FlatBufferBuilder builder, boolean offset64) { builder.addBoolean(13, offset64, false); }
|
||||
public static int endField(FlatBufferBuilder builder) {
|
||||
int o = builder.endTable();
|
||||
builder.required(o, 4); // name
|
||||
|
@ -21,4 +21,5 @@ class BaseType(object):
|
||||
Obj = 15
|
||||
Union = 16
|
||||
Array = 17
|
||||
MaxBaseType = 18
|
||||
Vector64 = 18
|
||||
MaxBaseType = 19
|
||||
|
@ -155,8 +155,16 @@ class Field(object):
|
||||
return self._tab.Get(flatbuffers.number_types.Uint16Flags, o + self._tab.Pos)
|
||||
return 0
|
||||
|
||||
# If the field uses 64-bit offsets.
|
||||
# Field
|
||||
def Offset64(self):
|
||||
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
|
||||
if o != 0:
|
||||
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
|
||||
return False
|
||||
|
||||
def FieldStart(builder):
|
||||
builder.StartObject(13)
|
||||
builder.StartObject(14)
|
||||
|
||||
def Start(builder):
|
||||
FieldStart(builder)
|
||||
@ -251,6 +259,12 @@ def FieldAddPadding(builder, padding):
|
||||
def AddPadding(builder: flatbuffers.Builder, padding: int):
|
||||
FieldAddPadding(builder, padding)
|
||||
|
||||
def FieldAddOffset64(builder, offset64):
|
||||
builder.PrependBoolSlot(13, offset64, 0)
|
||||
|
||||
def AddOffset64(builder: flatbuffers.Builder, offset64: bool):
|
||||
FieldAddOffset64(builder, offset64)
|
||||
|
||||
def FieldEnd(builder):
|
||||
return builder.EndObject()
|
||||
|
||||
|
@ -25,6 +25,7 @@ enum BaseType : byte {
|
||||
Obj, // Used for tables & structs.
|
||||
Union,
|
||||
Array,
|
||||
Vector64,
|
||||
|
||||
// Add any new type above this value.
|
||||
MaxBaseType
|
||||
@ -85,6 +86,8 @@ table Field {
|
||||
optional:bool = false;
|
||||
/// Number of padding octets to always add after this field. Structs only.
|
||||
padding:uint16 = 0;
|
||||
/// If the field uses 64-bit offsets.
|
||||
offset64:bool = false;
|
||||
}
|
||||
|
||||
table Object { // Used for both tables and structs.
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include "annotated_binary_text_gen.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
#include <fstream>
|
||||
#include <ostream>
|
||||
#include <sstream>
|
||||
@ -36,6 +37,7 @@ static std::string ToString(const BinarySectionType type) {
|
||||
case BinarySectionType::Struct: return "struct";
|
||||
case BinarySectionType::String: return "string";
|
||||
case BinarySectionType::Vector: return "vector";
|
||||
case BinarySectionType::Vector64: return "vector64";
|
||||
case BinarySectionType::Unknown: return "unknown";
|
||||
case BinarySectionType::Union: return "union";
|
||||
case BinarySectionType::Padding: return "padding";
|
||||
@ -44,7 +46,9 @@ static std::string ToString(const BinarySectionType type) {
|
||||
}
|
||||
|
||||
static bool IsOffset(const BinaryRegionType type) {
|
||||
return type == BinaryRegionType::UOffset || type == BinaryRegionType::SOffset;
|
||||
return type == BinaryRegionType::UOffset ||
|
||||
type == BinaryRegionType::SOffset ||
|
||||
type == BinaryRegionType::UOffset64;
|
||||
}
|
||||
|
||||
template<typename T> std::string ToString(T value) {
|
||||
@ -119,6 +123,9 @@ static std::string ToValueString(const BinaryRegion ®ion,
|
||||
case BinaryRegionType::UType: return ToValueString<uint8_t>(region, binary);
|
||||
|
||||
// Handle Offsets separately, incase they add additional details.
|
||||
case BinaryRegionType::UOffset64:
|
||||
s += ToValueString<uint64_t>(region, binary);
|
||||
break;
|
||||
case BinaryRegionType::UOffset:
|
||||
s += ToValueString<uint32_t>(region, binary);
|
||||
break;
|
||||
@ -368,7 +375,8 @@ static void GenerateSection(std::ostream &os, const BinarySection §ion,
|
||||
// As a space saving measure, skip generating every vector element, just put
|
||||
// the first and last elements in the output. Skip the whole thing if there
|
||||
// are only three or fewer elements, as it doesn't save space.
|
||||
if (section.type == BinarySectionType::Vector &&
|
||||
if ((section.type == BinarySectionType::Vector ||
|
||||
section.type == BinarySectionType::Vector64) &&
|
||||
!output_config.include_vector_contents && section.regions.size() > 4) {
|
||||
// Generate the length region which should be first.
|
||||
GenerateRegion(os, section.regions[0], section, binary, output_config);
|
||||
|
@ -1,10 +1,13 @@
|
||||
#include "binary_annotator.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
#include <iostream>
|
||||
#include <limits>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "flatbuffers/base.h"
|
||||
#include "flatbuffers/reflection.h"
|
||||
#include "flatbuffers/util.h"
|
||||
#include "flatbuffers/verifier.h"
|
||||
@ -37,9 +40,9 @@ static BinaryRegion MakeBinaryRegion(
|
||||
return region;
|
||||
}
|
||||
|
||||
static BinarySection MakeBinarySection(
|
||||
const std::string &name, const BinarySectionType type,
|
||||
std::vector<BinaryRegion> regions) {
|
||||
static BinarySection MakeBinarySection(const std::string &name,
|
||||
const BinarySectionType type,
|
||||
std::vector<BinaryRegion> regions) {
|
||||
BinarySection section;
|
||||
section.name = name;
|
||||
section.type = type;
|
||||
@ -118,12 +121,15 @@ static BinarySection GenerateMissingSection(const uint64_t offset,
|
||||
|
||||
std::map<uint64_t, BinarySection> BinaryAnnotator::Annotate() {
|
||||
flatbuffers::Verifier verifier(bfbs_, static_cast<size_t>(bfbs_length_));
|
||||
if (!reflection::VerifySchemaBuffer(verifier)) { return {}; }
|
||||
|
||||
if ((is_size_prefixed_ &&
|
||||
!reflection::VerifySizePrefixedSchemaBuffer(verifier)) ||
|
||||
!reflection::VerifySchemaBuffer(verifier)) {
|
||||
return {};
|
||||
}
|
||||
|
||||
// The binary is too short to read as a flatbuffers.
|
||||
// TODO(dbaileychess): We could spit out the annotated buffer sections, but
|
||||
// I'm not sure if it is worth it.
|
||||
if (binary_length_ < 4) { return {}; }
|
||||
if (binary_length_ < FLATBUFFERS_MIN_BUFFER_SIZE) { return {}; }
|
||||
|
||||
// Make sure we start with a clean slate.
|
||||
vtables_.clear();
|
||||
@ -151,7 +157,41 @@ std::map<uint64_t, BinarySection> BinaryAnnotator::Annotate() {
|
||||
}
|
||||
|
||||
uint64_t BinaryAnnotator::BuildHeader(const uint64_t header_offset) {
|
||||
const auto root_table_offset = ReadScalar<uint32_t>(header_offset);
|
||||
uint64_t offset = header_offset;
|
||||
std::vector<BinaryRegion> regions;
|
||||
|
||||
// If this binary is a size prefixed one, attempt to parse the size.
|
||||
if (is_size_prefixed_) {
|
||||
BinaryRegionComment prefix_length_comment;
|
||||
prefix_length_comment.type = BinaryRegionCommentType::SizePrefix;
|
||||
|
||||
bool has_prefix_value = false;
|
||||
const auto prefix_length = ReadScalar<uoffset64_t>(offset);
|
||||
if (*prefix_length <= binary_length_) {
|
||||
regions.push_back(MakeBinaryRegion(offset, sizeof(uoffset64_t),
|
||||
BinaryRegionType::Uint64, 0, 0,
|
||||
prefix_length_comment));
|
||||
offset += sizeof(uoffset64_t);
|
||||
has_prefix_value = true;
|
||||
}
|
||||
|
||||
if (!has_prefix_value) {
|
||||
const auto prefix_length = ReadScalar<uoffset_t>(offset);
|
||||
if (*prefix_length <= binary_length_) {
|
||||
regions.push_back(MakeBinaryRegion(offset, sizeof(uoffset_t),
|
||||
BinaryRegionType::Uint32, 0, 0,
|
||||
prefix_length_comment));
|
||||
offset += sizeof(uoffset_t);
|
||||
has_prefix_value = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!has_prefix_value) {
|
||||
SetError(prefix_length_comment, BinaryRegionStatus::ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
const auto root_table_offset = ReadScalar<uint32_t>(offset);
|
||||
|
||||
if (!root_table_offset.has_value()) {
|
||||
// This shouldn't occur, since we validate the min size of the buffer
|
||||
@ -159,22 +199,20 @@ uint64_t BinaryAnnotator::BuildHeader(const uint64_t header_offset) {
|
||||
return std::numeric_limits<uint64_t>::max();
|
||||
}
|
||||
|
||||
std::vector<BinaryRegion> regions;
|
||||
uint64_t offset = header_offset;
|
||||
// TODO(dbaileychess): sized prefixed value
|
||||
const auto root_table_loc = offset + *root_table_offset;
|
||||
|
||||
BinaryRegionComment root_offset_comment;
|
||||
root_offset_comment.type = BinaryRegionCommentType::RootTableOffset;
|
||||
root_offset_comment.name = schema_->root_table()->name()->str();
|
||||
|
||||
if (!IsValidOffset(root_table_offset.value())) {
|
||||
if (!IsValidOffset(root_table_loc)) {
|
||||
SetError(root_offset_comment,
|
||||
BinaryRegionStatus::ERROR_OFFSET_OUT_OF_BINARY);
|
||||
}
|
||||
|
||||
regions.push_back(
|
||||
MakeBinaryRegion(offset, sizeof(uint32_t), BinaryRegionType::UOffset, 0,
|
||||
root_table_offset.value(), root_offset_comment));
|
||||
regions.push_back(MakeBinaryRegion(offset, sizeof(uint32_t),
|
||||
BinaryRegionType::UOffset, 0,
|
||||
root_table_loc, root_offset_comment));
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
if (IsValidRead(offset, flatbuffers::kFileIdentifierLength) &&
|
||||
@ -193,7 +231,7 @@ uint64_t BinaryAnnotator::BuildHeader(const uint64_t header_offset) {
|
||||
AddSection(header_offset, MakeBinarySection("", BinarySectionType::Header,
|
||||
std::move(regions)));
|
||||
|
||||
return root_table_offset.value();
|
||||
return root_table_loc;
|
||||
}
|
||||
|
||||
BinaryAnnotator::VTable *BinaryAnnotator::GetOrBuildVTable(
|
||||
@ -656,7 +694,18 @@ void BinaryAnnotator::BuildTable(const uint64_t table_offset,
|
||||
}
|
||||
|
||||
// Read the offset
|
||||
const auto offset_from_field = ReadScalar<uint32_t>(field_offset);
|
||||
uint64_t offset = 0;
|
||||
uint64_t length = sizeof(uint32_t);
|
||||
BinaryRegionType region_type = BinaryRegionType::UOffset;
|
||||
|
||||
if (field->offset64()) {
|
||||
length = sizeof(uint64_t);
|
||||
region_type = BinaryRegionType::UOffset64;
|
||||
offset = ReadScalar<uint64_t>(field_offset).value_or(0);
|
||||
} else {
|
||||
offset = ReadScalar<uint32_t>(field_offset).value_or(0);
|
||||
}
|
||||
// const auto offset_from_field = ReadScalar<uint32_t>(field_offset);
|
||||
uint64_t offset_of_next_item = 0;
|
||||
BinaryRegionComment offset_field_comment;
|
||||
offset_field_comment.type = BinaryRegionCommentType::TableOffsetField;
|
||||
@ -666,7 +715,7 @@ void BinaryAnnotator::BuildTable(const uint64_t table_offset,
|
||||
|
||||
// Validate any field that isn't inline (i.e., non-structs).
|
||||
if (!IsInlineField(field)) {
|
||||
if (!offset_from_field.has_value()) {
|
||||
if (offset == 0) {
|
||||
const uint64_t remaining = RemainingBytes(field_offset);
|
||||
|
||||
SetError(offset_field_comment,
|
||||
@ -678,14 +727,14 @@ void BinaryAnnotator::BuildTable(const uint64_t table_offset,
|
||||
continue;
|
||||
}
|
||||
|
||||
offset_of_next_item = field_offset + offset_from_field.value();
|
||||
offset_of_next_item = field_offset + offset;
|
||||
|
||||
if (!IsValidOffset(offset_of_next_item)) {
|
||||
SetError(offset_field_comment,
|
||||
BinaryRegionStatus::ERROR_OFFSET_OUT_OF_BINARY);
|
||||
regions.push_back(MakeBinaryRegion(
|
||||
field_offset, sizeof(uint32_t), BinaryRegionType::UOffset, 0,
|
||||
offset_of_next_item, offset_field_comment));
|
||||
regions.push_back(MakeBinaryRegion(field_offset, length, region_type, 0,
|
||||
offset_of_next_item,
|
||||
offset_field_comment));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -702,9 +751,9 @@ void BinaryAnnotator::BuildTable(const uint64_t table_offset,
|
||||
} else {
|
||||
offset_field_comment.default_value = "(table)";
|
||||
|
||||
regions.push_back(MakeBinaryRegion(
|
||||
field_offset, sizeof(uint32_t), BinaryRegionType::UOffset, 0,
|
||||
offset_of_next_item, offset_field_comment));
|
||||
regions.push_back(MakeBinaryRegion(field_offset, length, region_type,
|
||||
0, offset_of_next_item,
|
||||
offset_field_comment));
|
||||
|
||||
BuildTable(offset_of_next_item, BinarySectionType::Table,
|
||||
next_object);
|
||||
@ -713,17 +762,25 @@ void BinaryAnnotator::BuildTable(const uint64_t table_offset,
|
||||
|
||||
case reflection::BaseType::String: {
|
||||
offset_field_comment.default_value = "(string)";
|
||||
regions.push_back(MakeBinaryRegion(
|
||||
field_offset, sizeof(uint32_t), BinaryRegionType::UOffset, 0,
|
||||
offset_of_next_item, offset_field_comment));
|
||||
regions.push_back(MakeBinaryRegion(field_offset, length, region_type, 0,
|
||||
offset_of_next_item,
|
||||
offset_field_comment));
|
||||
BuildString(offset_of_next_item, table, field);
|
||||
} break;
|
||||
|
||||
case reflection::BaseType::Vector: {
|
||||
offset_field_comment.default_value = "(vector)";
|
||||
regions.push_back(MakeBinaryRegion(
|
||||
field_offset, sizeof(uint32_t), BinaryRegionType::UOffset, 0,
|
||||
offset_of_next_item, offset_field_comment));
|
||||
regions.push_back(MakeBinaryRegion(field_offset, length, region_type, 0,
|
||||
offset_of_next_item,
|
||||
offset_field_comment));
|
||||
BuildVector(offset_of_next_item, table, field, table_offset,
|
||||
vtable->fields);
|
||||
} break;
|
||||
case reflection::BaseType::Vector64: {
|
||||
offset_field_comment.default_value = "(vector64)";
|
||||
regions.push_back(MakeBinaryRegion(field_offset, length, region_type, 0,
|
||||
offset_of_next_item,
|
||||
offset_field_comment));
|
||||
BuildVector(offset_of_next_item, table, field, table_offset,
|
||||
vtable->fields);
|
||||
} break;
|
||||
@ -768,8 +825,7 @@ void BinaryAnnotator::BuildTable(const uint64_t table_offset,
|
||||
offset_field_comment.default_value =
|
||||
"(union of type `" + enum_type + "`)";
|
||||
|
||||
regions.push_back(MakeBinaryRegion(field_offset, sizeof(uint32_t),
|
||||
BinaryRegionType::UOffset, 0,
|
||||
regions.push_back(MakeBinaryRegion(field_offset, length, region_type, 0,
|
||||
union_offset, offset_field_comment));
|
||||
|
||||
} break;
|
||||
@ -986,7 +1042,28 @@ void BinaryAnnotator::BuildVector(
|
||||
BinaryRegionComment vector_length_comment;
|
||||
vector_length_comment.type = BinaryRegionCommentType::VectorLength;
|
||||
|
||||
const auto vector_length = ReadScalar<uint32_t>(vector_offset);
|
||||
const bool is_64_bit_vector =
|
||||
field->type()->base_type() == reflection::BaseType::Vector64;
|
||||
|
||||
flatbuffers::Optional<uint64_t> vector_length;
|
||||
uint32_t vector_length_size_type = 0;
|
||||
BinaryRegionType region_type = BinaryRegionType::Uint32;
|
||||
BinarySectionType section_type = BinarySectionType::Vector;
|
||||
|
||||
if (is_64_bit_vector) {
|
||||
auto v = ReadScalar<uint64_t>(vector_offset);
|
||||
if (v.has_value()) { vector_length = v.value(); }
|
||||
vector_length_size_type = sizeof(uint64_t);
|
||||
region_type = BinaryRegionType::Uint64;
|
||||
section_type = BinarySectionType::Vector64;
|
||||
} else {
|
||||
auto v = ReadScalar<uint32_t>(vector_offset);
|
||||
if (v.has_value()) { vector_length = v.value(); }
|
||||
vector_length_size_type = sizeof(uint32_t);
|
||||
region_type = BinaryRegionType::Uint32;
|
||||
section_type = BinarySectionType::Vector;
|
||||
}
|
||||
|
||||
if (!vector_length.has_value()) {
|
||||
const uint64_t remaining = RemainingBytes(vector_offset);
|
||||
SetError(vector_length_comment, BinaryRegionStatus::ERROR_INCOMPLETE_BINARY,
|
||||
@ -1006,7 +1083,7 @@ void BinaryAnnotator::BuildVector(
|
||||
// Validate there are enough bytes left in the binary to process all the
|
||||
// items.
|
||||
const uint64_t last_item_offset =
|
||||
vector_offset + sizeof(uint32_t) +
|
||||
vector_offset + vector_length_size_type +
|
||||
vector_length.value() * GetElementSize(field);
|
||||
|
||||
if (!IsValidOffset(last_item_offset - 1)) {
|
||||
@ -1016,20 +1093,18 @@ void BinaryAnnotator::BuildVector(
|
||||
MakeSingleRegionBinarySection(
|
||||
std::string(table->name()->c_str()) + "." + field->name()->c_str(),
|
||||
BinarySectionType::Vector,
|
||||
MakeBinaryRegion(vector_offset, sizeof(uint32_t),
|
||||
BinaryRegionType::Uint32, 0, 0,
|
||||
vector_length_comment)));
|
||||
MakeBinaryRegion(vector_offset, vector_length_size_type,
|
||||
region_type, 0, 0, vector_length_comment)));
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<BinaryRegion> regions;
|
||||
|
||||
regions.push_back(MakeBinaryRegion(vector_offset, sizeof(uint32_t),
|
||||
BinaryRegionType::Uint32, 0, 0,
|
||||
vector_length_comment));
|
||||
regions.push_back(MakeBinaryRegion(vector_offset, vector_length_size_type,
|
||||
region_type, 0, 0, vector_length_comment));
|
||||
// Consume the vector length offset.
|
||||
uint64_t offset = vector_offset + sizeof(uint32_t);
|
||||
uint64_t offset = vector_offset + vector_length_size_type;
|
||||
|
||||
switch (field->type()->element()) {
|
||||
case reflection::BaseType::Obj: {
|
||||
@ -1302,7 +1377,7 @@ void BinaryAnnotator::BuildVector(
|
||||
AddSection(vector_offset,
|
||||
MakeBinarySection(std::string(table->name()->c_str()) + "." +
|
||||
field->name()->c_str(),
|
||||
BinarySectionType::Vector, std::move(regions)));
|
||||
section_type, std::move(regions)));
|
||||
}
|
||||
|
||||
std::string BinaryAnnotator::BuildUnion(const uint64_t union_offset,
|
||||
|
@ -48,6 +48,7 @@ enum class BinaryRegionType {
|
||||
Float = 15,
|
||||
Double = 16,
|
||||
UType = 17,
|
||||
UOffset64 = 18,
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
@ -179,6 +180,7 @@ enum class BinarySectionType {
|
||||
Vector = 7,
|
||||
Union = 8,
|
||||
Padding = 9,
|
||||
Vector64 = 10,
|
||||
};
|
||||
|
||||
// A section of the binary that is grouped together in some logical manner, and
|
||||
@ -216,6 +218,7 @@ inline static BinaryRegionType GetRegionType(reflection::BaseType base_type) {
|
||||
inline static std::string ToString(const BinaryRegionType type) {
|
||||
switch (type) {
|
||||
case BinaryRegionType::UOffset: return "UOffset32";
|
||||
case BinaryRegionType::UOffset64: return "UOffset64";
|
||||
case BinaryRegionType::SOffset: return "SOffset32";
|
||||
case BinaryRegionType::VOffset: return "VOffset16";
|
||||
case BinaryRegionType::Bool: return "bool";
|
||||
@ -242,12 +245,14 @@ class BinaryAnnotator {
|
||||
explicit BinaryAnnotator(const uint8_t *const bfbs,
|
||||
const uint64_t bfbs_length,
|
||||
const uint8_t *const binary,
|
||||
const uint64_t binary_length)
|
||||
const uint64_t binary_length,
|
||||
const bool is_size_prefixed)
|
||||
: bfbs_(bfbs),
|
||||
bfbs_length_(bfbs_length),
|
||||
schema_(reflection::GetSchema(bfbs)),
|
||||
binary_(binary),
|
||||
binary_length_(binary_length) {}
|
||||
binary_length_(binary_length),
|
||||
is_size_prefixed_(is_size_prefixed) {}
|
||||
|
||||
std::map<uint64_t, BinarySection> Annotate();
|
||||
|
||||
@ -387,6 +392,7 @@ class BinaryAnnotator {
|
||||
// The binary data itself.
|
||||
const uint8_t *binary_;
|
||||
const uint64_t binary_length_;
|
||||
const bool is_size_prefixed_;
|
||||
|
||||
// Map of binary offset to vtables, to dedupe vtables.
|
||||
std::map<uint64_t, std::list<VTable>> vtables_;
|
||||
|
@ -252,10 +252,9 @@ const static FlatCOption flatc_options[] = {
|
||||
"Currently this is required to generate private types in Rust" },
|
||||
{ "", "python-no-type-prefix-suffix", "",
|
||||
"Skip emission of Python functions that are prefixed with typenames" },
|
||||
{ "", "python-typing", "",
|
||||
"Generate Python type annotations" },
|
||||
{ "", "python-typing", "", "Generate Python type annotations" },
|
||||
{ "", "file-names-only", "",
|
||||
"Print out generated file names without writing to the files"},
|
||||
"Print out generated file names without writing to the files" },
|
||||
};
|
||||
|
||||
auto cmp = [](FlatCOption a, FlatCOption b) { return a.long_opt < b.long_opt; };
|
||||
@ -394,9 +393,11 @@ void FlatCompiler::AnnotateBinaries(const uint8_t *binary_schema,
|
||||
const uint8_t *binary =
|
||||
reinterpret_cast<const uint8_t *>(binary_contents.c_str());
|
||||
const size_t binary_size = binary_contents.size();
|
||||
const bool is_size_prefixed = options.opts.size_prefixed;
|
||||
|
||||
flatbuffers::BinaryAnnotator binary_annotator(
|
||||
binary_schema, binary_schema_size, binary, binary_size);
|
||||
binary_schema, binary_schema_size, binary, binary_size,
|
||||
is_size_prefixed);
|
||||
|
||||
auto annotations = binary_annotator.Annotate();
|
||||
|
||||
@ -663,7 +664,7 @@ FlatCOptions FlatCompiler::ParseFromCommandLineArguments(int argc,
|
||||
} else if (arg == "--annotate") {
|
||||
if (++argi >= argc) Error("missing path following: " + arg, true);
|
||||
options.annotate_schema = flatbuffers::PosixPath(argv[argi]);
|
||||
} else if(arg == "--file-names-only") {
|
||||
} else if (arg == "--file-names-only") {
|
||||
// TODO (khhn): Provide 2 implementation
|
||||
options.file_names_only = true;
|
||||
} else {
|
||||
|
@ -77,8 +77,7 @@ static std::string GenIncludeGuard(const std::string &file_name,
|
||||
static bool IsVectorOfPointers(const FieldDef &field) {
|
||||
const auto &type = field.value.type;
|
||||
const auto &vector_type = type.VectorType();
|
||||
return type.base_type == BASE_TYPE_VECTOR &&
|
||||
vector_type.base_type == BASE_TYPE_STRUCT &&
|
||||
return IsVector(type) && vector_type.base_type == BASE_TYPE_STRUCT &&
|
||||
!vector_type.struct_def->fixed && !field.native_inline;
|
||||
}
|
||||
|
||||
@ -107,6 +106,21 @@ struct IDLOptionsCpp : public IDLOptions {
|
||||
: IDLOptions(opts), g_cpp_std(CPP_STD_11), g_only_fixed_enums(true) {}
|
||||
};
|
||||
|
||||
// Iterates over all the fields of the object first by Offset type (Offset64
|
||||
// before Offset32) and then by definition order.
|
||||
static void ForAllFieldsOrderedByOffset(
|
||||
const StructDef &object, std::function<void(const FieldDef *field)> func) {
|
||||
// Loop over all the fields and call the func on all offset64 fields.
|
||||
for (const FieldDef *field_def : object.fields.vec) {
|
||||
if (field_def->offset64) { func(field_def); }
|
||||
}
|
||||
// Loop over all the fields a second time and call the func on all offset
|
||||
// fields.
|
||||
for (const FieldDef *field_def : object.fields.vec) {
|
||||
if (!field_def->offset64) { func(field_def); }
|
||||
}
|
||||
}
|
||||
|
||||
class CppGenerator : public BaseGenerator {
|
||||
public:
|
||||
CppGenerator(const Parser &parser, const std::string &path,
|
||||
@ -273,6 +287,25 @@ class CppGenerator : public BaseGenerator {
|
||||
}
|
||||
}
|
||||
|
||||
void MarkIf64BitBuilderIsNeeded() {
|
||||
if (needs_64_bit_builder_) { return; }
|
||||
for (auto t : parser_.structs_.vec) {
|
||||
if (t == nullptr) continue;
|
||||
for (auto f : t->fields.vec) {
|
||||
if (f == nullptr) continue;
|
||||
if (f->offset64) {
|
||||
needs_64_bit_builder_ = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::string GetBuilder() {
|
||||
return std::string("::flatbuffers::FlatBufferBuilder") +
|
||||
(needs_64_bit_builder_ ? "64" : "");
|
||||
}
|
||||
|
||||
void GenExtraIncludes() {
|
||||
for (const std::string &cpp_include : opts_.cpp_includes) {
|
||||
code_ += "#include \"" + cpp_include + "\"";
|
||||
@ -396,6 +429,9 @@ class CppGenerator : public BaseGenerator {
|
||||
// Iterate through all definitions we haven't generate code for (enums,
|
||||
// structs, and tables) and output them to a single file.
|
||||
bool generate() {
|
||||
// Check if we require a 64-bit flatbuffer builder.
|
||||
MarkIf64BitBuilderIsNeeded();
|
||||
|
||||
code_.Clear();
|
||||
code_ += "// " + std::string(FlatBuffersGeneratedWarning()) + "\n\n";
|
||||
|
||||
@ -530,6 +566,8 @@ class CppGenerator : public BaseGenerator {
|
||||
code_.SetValue("STRUCT_NAME", name);
|
||||
code_.SetValue("CPP_NAME", cpp_name);
|
||||
code_.SetValue("NULLABLE_EXT", NullableExtension());
|
||||
code_.SetValue(
|
||||
"SIZE_T", needs_64_bit_builder_ ? ",::flatbuffers::uoffset64_t" : "");
|
||||
|
||||
// The root datatype accessor:
|
||||
code_ += "inline \\";
|
||||
@ -546,7 +584,8 @@ class CppGenerator : public BaseGenerator {
|
||||
"*{{NULLABLE_EXT}}GetSizePrefixed{{STRUCT_NAME}}(const void "
|
||||
"*buf) {";
|
||||
code_ +=
|
||||
" return ::flatbuffers::GetSizePrefixedRoot<{{CPP_NAME}}>(buf);";
|
||||
" return "
|
||||
"::flatbuffers::GetSizePrefixedRoot<{{CPP_NAME}}{{SIZE_T}}>(buf);";
|
||||
code_ += "}";
|
||||
code_ += "";
|
||||
|
||||
@ -565,7 +604,8 @@ class CppGenerator : public BaseGenerator {
|
||||
"*buf) {";
|
||||
code_ +=
|
||||
" return "
|
||||
"::flatbuffers::GetMutableSizePrefixedRoot<{{CPP_NAME}}>(buf);";
|
||||
"::flatbuffers::GetMutableSizePrefixedRoot<{{CPP_NAME}}{{SIZE_T}}>("
|
||||
"buf);";
|
||||
code_ += "}";
|
||||
code_ += "";
|
||||
}
|
||||
@ -612,7 +652,8 @@ class CppGenerator : public BaseGenerator {
|
||||
code_ += "inline bool VerifySizePrefixed{{STRUCT_NAME}}Buffer(";
|
||||
code_ += " ::flatbuffers::Verifier &verifier) {";
|
||||
code_ +=
|
||||
" return verifier.VerifySizePrefixedBuffer<{{CPP_NAME}}>({{ID}});";
|
||||
" return "
|
||||
"verifier.VerifySizePrefixedBuffer<{{CPP_NAME}}{{SIZE_T}}>({{ID}});";
|
||||
code_ += "}";
|
||||
code_ += "";
|
||||
|
||||
@ -626,7 +667,7 @@ class CppGenerator : public BaseGenerator {
|
||||
|
||||
// Finish a buffer with a given root object:
|
||||
code_ += "inline void Finish{{STRUCT_NAME}}Buffer(";
|
||||
code_ += " ::flatbuffers::FlatBufferBuilder &fbb,";
|
||||
code_ += " " + GetBuilder() + " &fbb,";
|
||||
code_ += " ::flatbuffers::Offset<{{CPP_NAME}}> root) {";
|
||||
if (parser_.file_identifier_.length())
|
||||
code_ += " fbb.Finish(root, {{STRUCT_NAME}}Identifier());";
|
||||
@ -636,7 +677,7 @@ class CppGenerator : public BaseGenerator {
|
||||
code_ += "";
|
||||
|
||||
code_ += "inline void FinishSizePrefixed{{STRUCT_NAME}}Buffer(";
|
||||
code_ += " ::flatbuffers::FlatBufferBuilder &fbb,";
|
||||
code_ += " " + GetBuilder() + " &fbb,";
|
||||
code_ += " ::flatbuffers::Offset<{{CPP_NAME}}> root) {";
|
||||
if (parser_.file_identifier_.length())
|
||||
code_ += " fbb.FinishSizePrefixed(root, {{STRUCT_NAME}}Identifier());";
|
||||
@ -696,6 +737,7 @@ class CppGenerator : public BaseGenerator {
|
||||
|
||||
const IDLOptionsCpp opts_;
|
||||
const TypedFloatConstantGenerator float_const_gen_;
|
||||
bool needs_64_bit_builder_ = false;
|
||||
|
||||
const Namespace *CurrentNameSpace() const { return cur_name_space_; }
|
||||
|
||||
@ -747,10 +789,14 @@ class CppGenerator : public BaseGenerator {
|
||||
case BASE_TYPE_STRING: {
|
||||
return "::flatbuffers::String";
|
||||
}
|
||||
case BASE_TYPE_VECTOR64:
|
||||
case BASE_TYPE_VECTOR: {
|
||||
const auto type_name = GenTypeWire(
|
||||
type.VectorType(), "", VectorElementUserFacing(type.VectorType()));
|
||||
return "::flatbuffers::Vector<" + type_name + ">";
|
||||
return "::flatbuffers::Vector" +
|
||||
std::string((type.base_type == BASE_TYPE_VECTOR64) ? "64<"
|
||||
: "<") +
|
||||
type_name + ">";
|
||||
}
|
||||
case BASE_TYPE_STRUCT: {
|
||||
return WrapInNameSpace(*type.struct_def);
|
||||
@ -766,13 +812,15 @@ class CppGenerator : public BaseGenerator {
|
||||
// Return a C++ type for any type (scalar/pointer) specifically for
|
||||
// building a flatbuffer.
|
||||
std::string GenTypeWire(const Type &type, const char *postfix,
|
||||
bool user_facing_type) const {
|
||||
bool user_facing_type,
|
||||
bool _64_bit_offset = false) const {
|
||||
if (IsScalar(type.base_type)) {
|
||||
return GenTypeBasic(type, user_facing_type) + postfix;
|
||||
} else if (IsStruct(type)) {
|
||||
return "const " + GenTypePointer(type) + " *";
|
||||
} else {
|
||||
return "::flatbuffers::Offset<" + GenTypePointer(type) + ">" + postfix;
|
||||
return "::flatbuffers::Offset" + std::string(_64_bit_offset ? "64" : "") +
|
||||
"<" + GenTypePointer(type) + ">" + postfix;
|
||||
}
|
||||
}
|
||||
|
||||
@ -858,6 +906,7 @@ class CppGenerator : public BaseGenerator {
|
||||
case BASE_TYPE_STRING: {
|
||||
return NativeString(&field);
|
||||
}
|
||||
case BASE_TYPE_VECTOR64:
|
||||
case BASE_TYPE_VECTOR: {
|
||||
const auto type_name = GenTypeNative(type.VectorType(), true, field);
|
||||
if (type.struct_def &&
|
||||
@ -866,8 +915,9 @@ class CppGenerator : public BaseGenerator {
|
||||
type.struct_def->attributes.Lookup("native_custom_alloc");
|
||||
return "std::vector<" + type_name + "," +
|
||||
native_custom_alloc->constant + "<" + type_name + ">>";
|
||||
} else
|
||||
} else {
|
||||
return "std::vector<" + type_name + ">";
|
||||
}
|
||||
}
|
||||
case BASE_TYPE_STRUCT: {
|
||||
auto type_name = WrapInNameSpace(*type.struct_def);
|
||||
@ -1015,8 +1065,8 @@ class CppGenerator : public BaseGenerator {
|
||||
|
||||
std::string UnionPackSignature(const EnumDef &enum_def, bool inclass) {
|
||||
return "::flatbuffers::Offset<void> " +
|
||||
(inclass ? "" : Name(enum_def) + "Union::") +
|
||||
"Pack(::flatbuffers::FlatBufferBuilder &_fbb, " +
|
||||
(inclass ? "" : Name(enum_def) + "Union::") + "Pack(" +
|
||||
GetBuilder() + " &_fbb, " +
|
||||
"const ::flatbuffers::rehasher_function_t *_rehasher" +
|
||||
(inclass ? " = nullptr" : "") + ") const";
|
||||
}
|
||||
@ -1024,8 +1074,7 @@ class CppGenerator : public BaseGenerator {
|
||||
std::string TableCreateSignature(const StructDef &struct_def, bool predecl,
|
||||
const IDLOptions &opts) {
|
||||
return "::flatbuffers::Offset<" + Name(struct_def) + "> Create" +
|
||||
Name(struct_def) +
|
||||
"(::flatbuffers::FlatBufferBuilder &_fbb, const " +
|
||||
Name(struct_def) + "(" + GetBuilder() + " &_fbb, const " +
|
||||
NativeName(Name(struct_def), &struct_def, opts) +
|
||||
" *_o, const ::flatbuffers::rehasher_function_t *_rehasher" +
|
||||
(predecl ? " = nullptr" : "") + ")";
|
||||
@ -1035,7 +1084,7 @@ class CppGenerator : public BaseGenerator {
|
||||
const IDLOptions &opts) {
|
||||
return std::string(inclass ? "static " : "") + "::flatbuffers::Offset<" +
|
||||
Name(struct_def) + "> " + (inclass ? "" : Name(struct_def) + "::") +
|
||||
"Pack(::flatbuffers::FlatBufferBuilder &_fbb, " + "const " +
|
||||
"Pack(" + GetBuilder() + " &_fbb, " + "const " +
|
||||
NativeName(Name(struct_def), &struct_def, opts) + "* _o, " +
|
||||
"const ::flatbuffers::rehasher_function_t *_rehasher" +
|
||||
(inclass ? " = nullptr" : "") + ")";
|
||||
@ -1791,7 +1840,8 @@ class CppGenerator : public BaseGenerator {
|
||||
if (IsStruct(vtype)) {
|
||||
type = WrapInNameSpace(*vtype.struct_def);
|
||||
} else {
|
||||
type = GenTypeWire(vtype, "", VectorElementUserFacing(vtype));
|
||||
type = GenTypeWire(vtype, "", VectorElementUserFacing(vtype),
|
||||
field.offset64);
|
||||
}
|
||||
if (TypeHasKey(vtype)) {
|
||||
code_.SetValue("PARAM_TYPE", "std::vector<" + type + "> *");
|
||||
@ -1805,7 +1855,8 @@ class CppGenerator : public BaseGenerator {
|
||||
if (field.IsScalarOptional())
|
||||
code_.SetValue("PARAM_TYPE", GenOptionalDecl(type) + " ");
|
||||
else
|
||||
code_.SetValue("PARAM_TYPE", GenTypeWire(type, " ", true));
|
||||
code_.SetValue("PARAM_TYPE",
|
||||
GenTypeWire(type, " ", true, field.offset64));
|
||||
}
|
||||
code_ += "{{PRE}}{{PARAM_TYPE}}{{PARAM_NAME}} = {{PARAM_VALUE}}\\";
|
||||
}
|
||||
@ -1814,7 +1865,7 @@ class CppGenerator : public BaseGenerator {
|
||||
void GenMember(const FieldDef &field) {
|
||||
if (!field.deprecated && // Deprecated fields won't be accessible.
|
||||
field.value.type.base_type != BASE_TYPE_UTYPE &&
|
||||
(field.value.type.base_type != BASE_TYPE_VECTOR ||
|
||||
(!IsVector(field.value.type) ||
|
||||
field.value.type.element != BASE_TYPE_UTYPE)) {
|
||||
auto type = GenTypeNative(field.value.type, false, field);
|
||||
auto cpp_type = field.attributes.Lookup("cpp_type");
|
||||
@ -1918,7 +1969,7 @@ class CppGenerator : public BaseGenerator {
|
||||
Name(field) + "(" + native_default->constant + ")";
|
||||
}
|
||||
}
|
||||
} else if (cpp_type && field.value.type.base_type != BASE_TYPE_VECTOR) {
|
||||
} else if (cpp_type && !IsVector(field.value.type)) {
|
||||
if (!initializer_list.empty()) { initializer_list += ",\n "; }
|
||||
initializer_list += Name(field) + "(0)";
|
||||
}
|
||||
@ -2063,7 +2114,7 @@ class CppGenerator : public BaseGenerator {
|
||||
const auto rhs_accessor = "rhs." + accessor;
|
||||
if (!field.deprecated && // Deprecated fields won't be accessible.
|
||||
field.value.type.base_type != BASE_TYPE_UTYPE &&
|
||||
(field.value.type.base_type != BASE_TYPE_VECTOR ||
|
||||
(!IsVector(field.value.type) ||
|
||||
field.value.type.element != BASE_TYPE_UTYPE)) {
|
||||
if (!compare_op.empty()) { compare_op += " &&\n "; }
|
||||
if (struct_def.fixed || field.native_inline ||
|
||||
@ -2195,7 +2246,10 @@ class CppGenerator : public BaseGenerator {
|
||||
"{{PRE}}VerifyField{{REQUIRED}}<{{SIZE}}>(verifier, "
|
||||
"{{OFFSET}}, {{ALIGN}})\\";
|
||||
} else {
|
||||
code_ += "{{PRE}}VerifyOffset{{REQUIRED}}(verifier, {{OFFSET}})\\";
|
||||
code_.SetValue("OFFSET_SIZE", field.offset64 ? "64" : "");
|
||||
code_ +=
|
||||
"{{PRE}}VerifyOffset{{OFFSET_SIZE}}{{REQUIRED}}(verifier, "
|
||||
"{{OFFSET}})\\";
|
||||
}
|
||||
|
||||
switch (field.value.type.base_type) {
|
||||
@ -2217,6 +2271,7 @@ class CppGenerator : public BaseGenerator {
|
||||
code_ += "{{PRE}}verifier.VerifyString({{NAME}}())\\";
|
||||
break;
|
||||
}
|
||||
case BASE_TYPE_VECTOR64:
|
||||
case BASE_TYPE_VECTOR: {
|
||||
code_ += "{{PRE}}verifier.VerifyVector({{NAME}}())\\";
|
||||
|
||||
@ -2468,12 +2523,18 @@ class CppGenerator : public BaseGenerator {
|
||||
if (!field.IsScalarOptional()) {
|
||||
const bool is_scalar = IsScalar(type.base_type);
|
||||
std::string accessor;
|
||||
if (is_scalar)
|
||||
std::string offset_size = "";
|
||||
if (is_scalar) {
|
||||
accessor = "GetField<";
|
||||
else if (IsStruct(type))
|
||||
} else if (IsStruct(type)) {
|
||||
accessor = "GetStruct<";
|
||||
else
|
||||
accessor = "GetPointer<";
|
||||
} else {
|
||||
if (field.offset64) {
|
||||
accessor = "GetPointer64<";
|
||||
} else {
|
||||
accessor = "GetPointer<";
|
||||
}
|
||||
}
|
||||
auto offset_type = GenTypeGet(type, "", "const ", " *", false);
|
||||
auto call = accessor + offset_type + ">(" + offset_str;
|
||||
// Default value as second arg for non-pointer types.
|
||||
@ -2633,7 +2694,7 @@ class CppGenerator : public BaseGenerator {
|
||||
|
||||
auto offset_str = GenFieldOffsetName(field);
|
||||
if (is_scalar) {
|
||||
const auto wire_type = GenTypeWire(type, "", false);
|
||||
const auto wire_type = GenTypeWire(type, "", false, field.offset64);
|
||||
code_.SetValue("SET_FN", "SetField<" + wire_type + ">");
|
||||
code_.SetValue("OFFSET_NAME", offset_str);
|
||||
code_.SetValue("FIELD_TYPE", GenTypeBasic(type, true));
|
||||
@ -2665,7 +2726,11 @@ class CppGenerator : public BaseGenerator {
|
||||
} else {
|
||||
auto postptr = " *" + NullableExtension();
|
||||
auto wire_type = GenTypeGet(type, " ", "", postptr.c_str(), true);
|
||||
std::string accessor = IsStruct(type) ? "GetStruct<" : "GetPointer<";
|
||||
const std::string accessor = [&]() {
|
||||
if (IsStruct(type)) { return "GetStruct<"; }
|
||||
if (field.offset64) { return "GetPointer64<"; }
|
||||
return "GetPointer<";
|
||||
}();
|
||||
auto underlying = accessor + wire_type + ">(" + offset_str + ")";
|
||||
code_.SetValue("FIELD_TYPE", wire_type);
|
||||
code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, true, underlying));
|
||||
@ -2859,9 +2924,9 @@ class CppGenerator : public BaseGenerator {
|
||||
// Generate code to do force_align for the vector.
|
||||
if (align > 1) {
|
||||
const auto vtype = field.value.type.VectorType();
|
||||
const std::string &type = IsStruct(vtype)
|
||||
? WrapInNameSpace(*vtype.struct_def)
|
||||
: GenTypeWire(vtype, "", false);
|
||||
const std::string &type =
|
||||
IsStruct(vtype) ? WrapInNameSpace(*vtype.struct_def)
|
||||
: GenTypeWire(vtype, "", false, field.offset64);
|
||||
return "_fbb.ForceVectorAlignment(" + field_size + ", sizeof(" + type +
|
||||
"), " + std::to_string(static_cast<long long>(align)) + ");";
|
||||
}
|
||||
@ -2874,7 +2939,7 @@ class CppGenerator : public BaseGenerator {
|
||||
// Generate a builder struct:
|
||||
code_ += "struct {{STRUCT_NAME}}Builder {";
|
||||
code_ += " typedef {{STRUCT_NAME}} Table;";
|
||||
code_ += " ::flatbuffers::FlatBufferBuilder &fbb_;";
|
||||
code_ += " " + GetBuilder() + " &fbb_;";
|
||||
code_ += " ::flatbuffers::uoffset_t start_;";
|
||||
|
||||
bool has_string_or_vector_fields = false;
|
||||
@ -2897,12 +2962,14 @@ class CppGenerator : public BaseGenerator {
|
||||
// fbb_.AddElement<type>(offset, name, default);
|
||||
// }
|
||||
code_.SetValue("FIELD_NAME", Name(field));
|
||||
code_.SetValue("FIELD_TYPE", GenTypeWire(field.value.type, " ", true));
|
||||
code_.SetValue("FIELD_TYPE",
|
||||
GenTypeWire(field.value.type, " ", true, field.offset64));
|
||||
code_.SetValue("ADD_OFFSET", Name(struct_def) + "::" + offset);
|
||||
code_.SetValue("ADD_NAME", name);
|
||||
code_.SetValue("ADD_VALUE", value);
|
||||
if (is_scalar) {
|
||||
const auto type = GenTypeWire(field.value.type, "", false);
|
||||
const auto type =
|
||||
GenTypeWire(field.value.type, "", false, field.offset64);
|
||||
code_.SetValue("ADD_FN", "AddElement<" + type + ">");
|
||||
} else if (IsStruct(field.value.type)) {
|
||||
code_.SetValue("ADD_FN", "AddStruct");
|
||||
@ -2921,9 +2988,9 @@ class CppGenerator : public BaseGenerator {
|
||||
}
|
||||
|
||||
// Builder constructor
|
||||
code_ +=
|
||||
" explicit {{STRUCT_NAME}}Builder(::flatbuffers::FlatBufferBuilder "
|
||||
"&_fbb)";
|
||||
code_ += " explicit {{STRUCT_NAME}}Builder(" + GetBuilder() +
|
||||
" "
|
||||
"&_fbb)";
|
||||
code_ += " : fbb_(_fbb) {";
|
||||
code_ += " start_ = fbb_.StartTable();";
|
||||
code_ += " }";
|
||||
@ -2950,7 +3017,7 @@ class CppGenerator : public BaseGenerator {
|
||||
code_ +=
|
||||
"inline ::flatbuffers::Offset<{{STRUCT_NAME}}> "
|
||||
"Create{{STRUCT_NAME}}(";
|
||||
code_ += " ::flatbuffers::FlatBufferBuilder &_fbb\\";
|
||||
code_ += " " + GetBuilder() + " &_fbb\\";
|
||||
for (const auto &field : struct_def.fields.vec) {
|
||||
if (!field->deprecated) { GenParam(*field, false, ",\n "); }
|
||||
}
|
||||
@ -2988,7 +3055,7 @@ class CppGenerator : public BaseGenerator {
|
||||
code_ +=
|
||||
"inline ::flatbuffers::Offset<{{STRUCT_NAME}}> "
|
||||
"Create{{STRUCT_NAME}}Direct(";
|
||||
code_ += " ::flatbuffers::FlatBufferBuilder &_fbb\\";
|
||||
code_ += " " + GetBuilder() + " &_fbb\\";
|
||||
for (const auto &field : struct_def.fields.vec) {
|
||||
if (!field->deprecated) { GenParam(*field, true, ",\n "); }
|
||||
}
|
||||
@ -2997,54 +3064,85 @@ class CppGenerator : public BaseGenerator {
|
||||
struct_def.defined_namespace->GetFullyQualifiedName("Create");
|
||||
code_.SetValue("CREATE_NAME", TranslateNameSpace(qualified_create_name));
|
||||
code_ += ") {";
|
||||
for (const auto &field : struct_def.fields.vec) {
|
||||
if (!field->deprecated) {
|
||||
code_.SetValue("FIELD_NAME", Name(*field));
|
||||
if (IsString(field->value.type)) {
|
||||
if (!field->shared) {
|
||||
code_.SetValue("CREATE_STRING", "CreateString");
|
||||
} else {
|
||||
code_.SetValue("CREATE_STRING", "CreateSharedString");
|
||||
}
|
||||
code_ +=
|
||||
" auto {{FIELD_NAME}}__ = {{FIELD_NAME}} ? "
|
||||
"_fbb.{{CREATE_STRING}}({{FIELD_NAME}}) : 0;";
|
||||
} else if (IsVector(field->value.type)) {
|
||||
const std::string force_align_code =
|
||||
GenVectorForceAlign(*field, Name(*field) + "->size()");
|
||||
if (!force_align_code.empty()) {
|
||||
code_ += " if ({{FIELD_NAME}}) { " + force_align_code + " }";
|
||||
}
|
||||
code_ += " auto {{FIELD_NAME}}__ = {{FIELD_NAME}} ? \\";
|
||||
const auto vtype = field->value.type.VectorType();
|
||||
const auto has_key = TypeHasKey(vtype);
|
||||
if (IsStruct(vtype)) {
|
||||
const auto type = WrapInNameSpace(*vtype.struct_def);
|
||||
code_ += (has_key ? "_fbb.CreateVectorOfSortedStructs<"
|
||||
: "_fbb.CreateVectorOfStructs<") +
|
||||
type + ">\\";
|
||||
} else if (has_key) {
|
||||
const auto type = WrapInNameSpace(*vtype.struct_def);
|
||||
code_ += "_fbb.CreateVectorOfSortedTables<" + type + ">\\";
|
||||
} else {
|
||||
const auto type =
|
||||
GenTypeWire(vtype, "", VectorElementUserFacing(vtype));
|
||||
code_ += "_fbb.CreateVector<" + type + ">\\";
|
||||
}
|
||||
code_ +=
|
||||
has_key ? "({{FIELD_NAME}}) : 0;" : "(*{{FIELD_NAME}}) : 0;";
|
||||
// Offset64 bit fields need to be added to the buffer first, so here we
|
||||
// loop over the fields in order of their offset size, followed by their
|
||||
// definition order. Otherwise the emitted code might add a Offset
|
||||
// followed by an Offset64 which would trigger an assertion.
|
||||
|
||||
// TODO(derekbailey): maybe optimize for the case where there is no
|
||||
// 64offsets in the whole schema?
|
||||
ForAllFieldsOrderedByOffset(struct_def, [&](const FieldDef *field) {
|
||||
if (field->deprecated) { return; }
|
||||
code_.SetValue("FIELD_NAME", Name(*field));
|
||||
if (IsString(field->value.type)) {
|
||||
if (!field->shared) {
|
||||
code_.SetValue(
|
||||
"CREATE_STRING",
|
||||
"CreateString" + std::string(field->offset64
|
||||
? "<::flatbuffers::Offset64>"
|
||||
: ""));
|
||||
} else {
|
||||
code_.SetValue("CREATE_STRING", "CreateSharedString");
|
||||
}
|
||||
code_ +=
|
||||
" auto {{FIELD_NAME}}__ = {{FIELD_NAME}} ? "
|
||||
"_fbb.{{CREATE_STRING}}({{FIELD_NAME}}) : 0;";
|
||||
} else if (IsVector(field->value.type)) {
|
||||
const std::string force_align_code =
|
||||
GenVectorForceAlign(*field, Name(*field) + "->size()");
|
||||
if (!force_align_code.empty()) {
|
||||
code_ += " if ({{FIELD_NAME}}) { " + force_align_code + " }";
|
||||
}
|
||||
code_ += " auto {{FIELD_NAME}}__ = {{FIELD_NAME}} ? \\";
|
||||
const auto vtype = field->value.type.VectorType();
|
||||
const auto has_key = TypeHasKey(vtype);
|
||||
if (IsStruct(vtype)) {
|
||||
const std::string type = WrapInNameSpace(*vtype.struct_def);
|
||||
if (has_key) {
|
||||
code_ += "_fbb.CreateVectorOfSortedStructs<" + type + ">\\";
|
||||
} else {
|
||||
// If the field uses 64-bit addressing, create a 64-bit vector.
|
||||
if (field->value.type.base_type == BASE_TYPE_VECTOR64) {
|
||||
code_ += "_fbb.CreateVectorOfStructs64\\";
|
||||
} else {
|
||||
code_ += "_fbb.CreateVectorOfStructs\\";
|
||||
if (field->offset64) {
|
||||
// This is normal 32-bit vector, with 64-bit addressing.
|
||||
code_ += "64<::flatbuffers::Vector>\\";
|
||||
} else {
|
||||
code_ += "<" + type + ">\\";
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (has_key) {
|
||||
const auto type = WrapInNameSpace(*vtype.struct_def);
|
||||
code_ += "_fbb.CreateVectorOfSortedTables<" + type + ">\\";
|
||||
} else {
|
||||
const auto type = GenTypeWire(
|
||||
vtype, "", VectorElementUserFacing(vtype), field->offset64);
|
||||
|
||||
if (field->value.type.base_type == BASE_TYPE_VECTOR64) {
|
||||
code_ += "_fbb.CreateVector64\\";
|
||||
} else {
|
||||
// If the field uses 64-bit addressing, create a 64-bit vector.
|
||||
code_.SetValue("64OFFSET", field->offset64 ? "64" : "");
|
||||
code_.SetValue("TYPE",
|
||||
field->offset64 ? "::flatbuffers::Vector" : type);
|
||||
|
||||
code_ += "_fbb.CreateVector{{64OFFSET}}<{{TYPE}}>\\";
|
||||
}
|
||||
}
|
||||
code_ += has_key ? "({{FIELD_NAME}}) : 0;" : "(*{{FIELD_NAME}}) : 0;";
|
||||
}
|
||||
}
|
||||
});
|
||||
code_ += " return {{CREATE_NAME}}{{STRUCT_NAME}}(";
|
||||
code_ += " _fbb\\";
|
||||
for (const auto &field : struct_def.fields.vec) {
|
||||
if (!field->deprecated) {
|
||||
code_.SetValue("FIELD_NAME", Name(*field));
|
||||
code_ += ",\n {{FIELD_NAME}}\\";
|
||||
if (IsString(field->value.type) || IsVector(field->value.type)) {
|
||||
code_ += "__\\";
|
||||
}
|
||||
if (field->deprecated) { continue; }
|
||||
code_.SetValue("FIELD_NAME", Name(*field));
|
||||
code_ += ",\n {{FIELD_NAME}}\\";
|
||||
if (IsString(field->value.type) || IsVector(field->value.type)) {
|
||||
code_ += "__\\";
|
||||
}
|
||||
}
|
||||
code_ += ");";
|
||||
@ -3115,6 +3213,7 @@ class CppGenerator : public BaseGenerator {
|
||||
const FieldDef *union_field) {
|
||||
std::string code;
|
||||
switch (field.value.type.base_type) {
|
||||
case BASE_TYPE_VECTOR64:
|
||||
case BASE_TYPE_VECTOR: {
|
||||
auto name = Name(field);
|
||||
if (field.value.type.element == BASE_TYPE_UTYPE) {
|
||||
@ -3151,8 +3250,11 @@ class CppGenerator : public BaseGenerator {
|
||||
? ".type"
|
||||
: (field.value.type.element == BASE_TYPE_UNION ? ".value"
|
||||
: "");
|
||||
|
||||
code += "for (::flatbuffers::uoffset_t _i = 0;";
|
||||
if (field.value.type.base_type == BASE_TYPE_VECTOR64) {
|
||||
code += "for (::flatbuffers::uoffset64_t _i = 0;";
|
||||
} else {
|
||||
code += "for (::flatbuffers::uoffset_t _i = 0;";
|
||||
}
|
||||
code += " _i < _e->size(); _i++) { ";
|
||||
auto cpp_type = field.attributes.Lookup("cpp_type");
|
||||
if (cpp_type) {
|
||||
@ -3265,8 +3367,7 @@ class CppGenerator : public BaseGenerator {
|
||||
} else {
|
||||
value += Name(field);
|
||||
}
|
||||
if (field.value.type.base_type != BASE_TYPE_VECTOR &&
|
||||
field.attributes.Lookup("cpp_type")) {
|
||||
if (!IsVector(field.value.type) && field.attributes.Lookup("cpp_type")) {
|
||||
auto type = GenTypeBasic(field.value.type, false);
|
||||
value =
|
||||
"_rehasher ? "
|
||||
@ -3282,7 +3383,10 @@ class CppGenerator : public BaseGenerator {
|
||||
// _fbb.CreateSharedString(_o->field)
|
||||
case BASE_TYPE_STRING: {
|
||||
if (!field.shared) {
|
||||
code += "_fbb.CreateString(";
|
||||
code +=
|
||||
"_fbb.CreateString" +
|
||||
std::string(field.offset64 ? "<::flatbuffers::Offset64>" : "") +
|
||||
"(";
|
||||
} else {
|
||||
code += "_fbb.CreateSharedString(";
|
||||
}
|
||||
@ -3309,6 +3413,7 @@ class CppGenerator : public BaseGenerator {
|
||||
// _fbb.CreateVector<Offset<T>>(_o->field.size() [&](size_t i) {
|
||||
// return CreateT(_fbb, _o->Get(i), rehasher);
|
||||
// });
|
||||
case BASE_TYPE_VECTOR64:
|
||||
case BASE_TYPE_VECTOR: {
|
||||
auto vector_type = field.value.type.VectorType();
|
||||
switch (vector_type.base_type) {
|
||||
@ -3347,7 +3452,16 @@ class CppGenerator : public BaseGenerator {
|
||||
}
|
||||
code += ")";
|
||||
} else {
|
||||
code += "_fbb.CreateVectorOfStructs";
|
||||
// If the field uses 64-bit addressing, create a 64-bit vector.
|
||||
if (field.value.type.base_type == BASE_TYPE_VECTOR64) {
|
||||
code += "_fbb.CreateVectorOfStructs64";
|
||||
} else {
|
||||
code += "_fbb.CreateVectorOfStructs";
|
||||
if (field.offset64) {
|
||||
// This is normal 32-bit vector, with 64-bit addressing.
|
||||
code += "64<::flatbuffers::Vector>";
|
||||
}
|
||||
}
|
||||
code += "(" + value + ")";
|
||||
}
|
||||
} else {
|
||||
@ -3413,7 +3527,17 @@ class CppGenerator : public BaseGenerator {
|
||||
code += "(__va->_" + value + "[i]" + GenPtrGet(field) + ")) : 0";
|
||||
code += "; }, &_va )";
|
||||
} else {
|
||||
code += "_fbb.CreateVector(" + value + ")";
|
||||
// If the field uses 64-bit addressing, create a 64-bit vector.
|
||||
if (field.value.type.base_type == BASE_TYPE_VECTOR64) {
|
||||
code += "_fbb.CreateVector64(" + value + ")";
|
||||
} else {
|
||||
code += "_fbb.CreateVector";
|
||||
if (field.offset64) {
|
||||
// This is normal 32-bit vector, with 64-bit addressing.
|
||||
code += "64<::flatbuffers::Vector>";
|
||||
}
|
||||
code += "(" + value + ")";
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -3540,7 +3664,9 @@ class CppGenerator : public BaseGenerator {
|
||||
|
||||
code_ +=
|
||||
" struct _VectorArgs "
|
||||
"{ ::flatbuffers::FlatBufferBuilder *__fbb; "
|
||||
"{ " +
|
||||
GetBuilder() +
|
||||
" *__fbb; "
|
||||
"const " +
|
||||
NativeName(Name(struct_def), &struct_def, opts_) +
|
||||
"* __o; "
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "flatbuffers/base.h"
|
||||
#include "flatbuffers/code_generator.h"
|
||||
#include "flatbuffers/flatbuffers.h"
|
||||
#include "flatbuffers/flexbuffers.h"
|
||||
@ -101,13 +102,13 @@ struct JsonPrinter {
|
||||
|
||||
// Print a vector or an array of JSON values, comma seperated, wrapped in
|
||||
// "[]".
|
||||
template<typename Container>
|
||||
const char *PrintContainer(PrintScalarTag, const Container &c, size_t size,
|
||||
template<typename Container, typename SizeT = typename Container::size_type>
|
||||
const char *PrintContainer(PrintScalarTag, const Container &c, SizeT size,
|
||||
const Type &type, int indent, const uint8_t *) {
|
||||
const auto elem_indent = indent + Indent();
|
||||
text += '[';
|
||||
AddNewLine();
|
||||
for (uoffset_t i = 0; i < size; i++) {
|
||||
for (SizeT i = 0; i < size; i++) {
|
||||
if (i) {
|
||||
AddComma();
|
||||
AddNewLine();
|
||||
@ -123,14 +124,14 @@ struct JsonPrinter {
|
||||
|
||||
// Print a vector or an array of JSON values, comma seperated, wrapped in
|
||||
// "[]".
|
||||
template<typename Container>
|
||||
const char *PrintContainer(PrintPointerTag, const Container &c, size_t size,
|
||||
template<typename Container, typename SizeT = typename Container::size_type>
|
||||
const char *PrintContainer(PrintPointerTag, const Container &c, SizeT size,
|
||||
const Type &type, int indent, const uint8_t *prev_val) {
|
||||
const auto is_struct = IsStruct(type);
|
||||
const auto elem_indent = indent + Indent();
|
||||
text += '[';
|
||||
AddNewLine();
|
||||
for (uoffset_t i = 0; i < size; i++) {
|
||||
for (SizeT i = 0; i < size; i++) {
|
||||
if (i) {
|
||||
AddComma();
|
||||
AddNewLine();
|
||||
@ -149,10 +150,10 @@ struct JsonPrinter {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
template<typename T, typename SizeT = uoffset_t>
|
||||
const char *PrintVector(const void *val, const Type &type, int indent,
|
||||
const uint8_t *prev_val) {
|
||||
typedef Vector<T> Container;
|
||||
typedef Vector<T, SizeT> Container;
|
||||
typedef typename PrintTag<typename Container::return_type>::type tag;
|
||||
auto &vec = *reinterpret_cast<const Container *>(val);
|
||||
return PrintContainer<Container>(tag(), vec, vec.size(), type, indent,
|
||||
@ -161,8 +162,9 @@ struct JsonPrinter {
|
||||
|
||||
// Print an array a sequence of JSON values, comma separated, wrapped in "[]".
|
||||
template<typename T>
|
||||
const char *PrintArray(const void *val, size_t size, const Type &type,
|
||||
int indent) {
|
||||
const char *PrintArray(const void *val, uint16_t size, const Type &type,
|
||||
|
||||
int indent) {
|
||||
typedef Array<T, 0xFFFF> Container;
|
||||
typedef typename PrintTag<typename Container::return_type>::type tag;
|
||||
auto &arr = *reinterpret_cast<const Container *>(val);
|
||||
@ -240,7 +242,7 @@ struct JsonPrinter {
|
||||
}
|
||||
|
||||
template<typename T> static T GetFieldDefault(const FieldDef &fd) {
|
||||
T val;
|
||||
T val{};
|
||||
auto check = StringToNumber(fd.value.constant.c_str(), &val);
|
||||
(void)check;
|
||||
FLATBUFFERS_ASSERT(check);
|
||||
|
@ -16,12 +16,15 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <iostream>
|
||||
#include <list>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "flatbuffers/base.h"
|
||||
#include "flatbuffers/buffer.h"
|
||||
#include "flatbuffers/idl.h"
|
||||
#include "flatbuffers/reflection_generated.h"
|
||||
#include "flatbuffers/util.h"
|
||||
|
||||
namespace flatbuffers {
|
||||
@ -42,7 +45,8 @@ static const double kPi = 3.14159265358979323846;
|
||||
|
||||
// The enums in the reflection schema should match the ones we use internally.
|
||||
// Compare the last element to check if these go out of sync.
|
||||
static_assert(BASE_TYPE_UNION == static_cast<BaseType>(reflection::Union),
|
||||
static_assert(BASE_TYPE_VECTOR64 ==
|
||||
static_cast<BaseType>(reflection::MaxBaseType - 1),
|
||||
"enums don't match");
|
||||
|
||||
// Any parsing calls have to be wrapped in this macro, which automates
|
||||
@ -124,6 +128,14 @@ CheckedError atot<Offset<void>>(const char *s, Parser &parser,
|
||||
return NoError();
|
||||
}
|
||||
|
||||
template<>
|
||||
CheckedError atot<Offset64<void>>(const char *s, Parser &parser,
|
||||
Offset64<void> *val) {
|
||||
(void)parser;
|
||||
*val = Offset64<void>(atoi(s));
|
||||
return NoError();
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static T *LookupTableByName(const SymbolTable<T> &table,
|
||||
const std::string &name,
|
||||
@ -957,11 +969,11 @@ CheckedError Parser::ParseField(StructDef &struct_def) {
|
||||
ECHECK(AddField(struct_def, name, type, &field));
|
||||
|
||||
if (typefield) {
|
||||
// We preserve the relation between the typefield
|
||||
// and field, so we can easily map it in the code
|
||||
// generators.
|
||||
typefield->sibling_union_field = field;
|
||||
field->sibling_union_field = typefield;
|
||||
// We preserve the relation between the typefield
|
||||
// and field, so we can easily map it in the code
|
||||
// generators.
|
||||
typefield->sibling_union_field = field;
|
||||
field->sibling_union_field = typefield;
|
||||
}
|
||||
|
||||
if (token_ == '=') {
|
||||
@ -1036,6 +1048,65 @@ CheckedError Parser::ParseField(StructDef &struct_def) {
|
||||
}
|
||||
}
|
||||
|
||||
if (field->attributes.Lookup("vector64") != nullptr) {
|
||||
if (!IsVector(type)) {
|
||||
return Error("`vector64` attribute can only be applied on vectors.");
|
||||
}
|
||||
|
||||
// Upgrade the type to be a BASE_TYPE_VECTOR64, since the attributes are
|
||||
// parsed after the type.
|
||||
const BaseType element_base_type = type.element;
|
||||
type = Type(BASE_TYPE_VECTOR64, type.struct_def, type.enum_def);
|
||||
type.element = element_base_type;
|
||||
|
||||
// Since the field was already added to the parent object, update the type
|
||||
// in place.
|
||||
field->value.type = type;
|
||||
|
||||
// 64-bit vectors imply the offset64 attribute.
|
||||
field->offset64 = true;
|
||||
}
|
||||
|
||||
// Record that this field uses 64-bit offsets.
|
||||
if (field->attributes.Lookup("offset64") != nullptr) {
|
||||
// TODO(derekbailey): would be nice to have this be a recommendation or hint
|
||||
// instead of a warning.
|
||||
if (type.base_type == BASE_TYPE_VECTOR64) {
|
||||
Warning("attribute `vector64` implies `offset64` and isn't required.");
|
||||
}
|
||||
|
||||
field->offset64 = true;
|
||||
}
|
||||
|
||||
// Check for common conditions with Offset64 fields.
|
||||
if (field->offset64) {
|
||||
// TODO(derekbailey): this is where we can disable string support for
|
||||
// offset64, as that is not a hard requirement to have.
|
||||
if (!IsString(type) && !IsVector(type)) {
|
||||
return Error(
|
||||
"only string and vectors can have `offset64` attribute applied");
|
||||
}
|
||||
|
||||
// If this is a Vector, only scalar and scalar-like (structs) items are
|
||||
// allowed.
|
||||
// TODO(derekbailey): allow vector of strings, just require that the strings
|
||||
// are Offset64<string>.
|
||||
if (IsVector(type) &&
|
||||
!((IsScalar(type.element) && !IsEnum(type.VectorType())) ||
|
||||
IsStruct(type.VectorType()))) {
|
||||
return Error("only vectors of scalars are allowed to be 64-bit.");
|
||||
}
|
||||
|
||||
// Lastly, check if it is supported by the specified generated languages. Do
|
||||
// this last so the above checks can inform the user of schema errors to fix
|
||||
// first.
|
||||
if (!Supports64BitOffsets()) {
|
||||
return Error(
|
||||
"fields using 64-bit offsets are not yet supported in at least one "
|
||||
"of the specified programming languages.");
|
||||
}
|
||||
}
|
||||
|
||||
// For historical convenience reasons, string keys are assumed required.
|
||||
// Scalars are kDefault unless otherwise specified.
|
||||
// Nonscalars are kOptional unless required;
|
||||
@ -1058,7 +1129,8 @@ CheckedError Parser::ParseField(StructDef &struct_def) {
|
||||
if (field->key) {
|
||||
if (struct_def.has_key) return Error("only one field may be set as 'key'");
|
||||
struct_def.has_key = true;
|
||||
auto is_valid = IsScalar(type.base_type) || IsString(type) || IsStruct(type);
|
||||
auto is_valid =
|
||||
IsScalar(type.base_type) || IsString(type) || IsStruct(type);
|
||||
if (IsArray(type)) {
|
||||
is_valid |=
|
||||
IsScalar(type.VectorType().base_type) || IsStruct(type.VectorType());
|
||||
@ -1161,7 +1233,7 @@ CheckedError Parser::ParseField(StructDef &struct_def) {
|
||||
if (nested->type.base_type != BASE_TYPE_STRING)
|
||||
return Error(
|
||||
"nested_flatbuffer attribute must be a string (the root type)");
|
||||
if (type.base_type != BASE_TYPE_VECTOR || type.element != BASE_TYPE_UCHAR)
|
||||
if (!IsVector(type.base_type) || type.element != BASE_TYPE_UCHAR)
|
||||
return Error(
|
||||
"nested_flatbuffer attribute may only apply to a vector of ubyte");
|
||||
// This will cause an error if the root type of the nested flatbuffer
|
||||
@ -1230,7 +1302,7 @@ CheckedError Parser::ParseComma() {
|
||||
CheckedError Parser::ParseAnyValue(Value &val, FieldDef *field,
|
||||
size_t parent_fieldn,
|
||||
const StructDef *parent_struct_def,
|
||||
uoffset_t count, bool inside_vector) {
|
||||
size_t count, bool inside_vector) {
|
||||
switch (val.type.base_type) {
|
||||
case BASE_TYPE_UNION: {
|
||||
FLATBUFFERS_ASSERT(field);
|
||||
@ -1300,7 +1372,7 @@ CheckedError Parser::ParseAnyValue(Value &val, FieldDef *field,
|
||||
return Error(
|
||||
"union types vector smaller than union values vector for: " +
|
||||
field->name);
|
||||
enum_idx = vector_of_union_types->Get(count);
|
||||
enum_idx = vector_of_union_types->Get(static_cast<uoffset_t>(count));
|
||||
} else {
|
||||
ECHECK(atot(constant.c_str(), *this, &enum_idx));
|
||||
}
|
||||
@ -1329,9 +1401,10 @@ CheckedError Parser::ParseAnyValue(Value &val, FieldDef *field,
|
||||
ECHECK(ParseString(val, field->shared));
|
||||
break;
|
||||
}
|
||||
case BASE_TYPE_VECTOR64:
|
||||
case BASE_TYPE_VECTOR: {
|
||||
uoffset_t off;
|
||||
ECHECK(ParseVector(val.type.VectorType(), &off, field, parent_fieldn));
|
||||
ECHECK(ParseVector(val.type, &off, field, parent_fieldn));
|
||||
val.constant = NumToString(off);
|
||||
break;
|
||||
}
|
||||
@ -1503,6 +1576,9 @@ CheckedError Parser::ParseTable(const StructDef &struct_def, std::string *value,
|
||||
for (size_t size = struct_def.sortbysize ? sizeof(largest_scalar_t) : 1; size;
|
||||
size /= 2) {
|
||||
// Go through elements in reverse, since we're building the data backwards.
|
||||
// TODO(derekbailey): this doesn't work when there are Offset64 fields, as
|
||||
// those have to be built first. So this needs to be changed to iterate over
|
||||
// Offset64 then Offset32 fields.
|
||||
for (auto it = field_stack_.rbegin();
|
||||
it != field_stack_.rbegin() + fieldn_outer; ++it) {
|
||||
auto &field_value = it->first;
|
||||
@ -1510,7 +1586,7 @@ CheckedError Parser::ParseTable(const StructDef &struct_def, std::string *value,
|
||||
if (!struct_def.sortbysize ||
|
||||
size == SizeOf(field_value.type.base_type)) {
|
||||
switch (field_value.type.base_type) {
|
||||
// clang-format off
|
||||
// clang-format off
|
||||
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, ...) \
|
||||
case BASE_TYPE_ ## ENUM: \
|
||||
builder_.Pad(field->padding); \
|
||||
@ -1541,9 +1617,16 @@ CheckedError Parser::ParseTable(const StructDef &struct_def, std::string *value,
|
||||
if (IsStruct(field->value.type)) { \
|
||||
SerializeStruct(*field->value.type.struct_def, field_value); \
|
||||
} else { \
|
||||
CTYPE val; \
|
||||
ECHECK(atot(field_value.constant.c_str(), *this, &val)); \
|
||||
builder_.AddOffset(field_value.offset, val); \
|
||||
/* Special case for fields that use 64-bit addressing */ \
|
||||
if(field->offset64) { \
|
||||
Offset64<void> offset; \
|
||||
ECHECK(atot(field_value.constant.c_str(), *this, &offset)); \
|
||||
builder_.AddOffset(field_value.offset, offset); \
|
||||
} else { \
|
||||
CTYPE val; \
|
||||
ECHECK(atot(field_value.constant.c_str(), *this, &val)); \
|
||||
builder_.AddOffset(field_value.offset, val); \
|
||||
} \
|
||||
} \
|
||||
break;
|
||||
FLATBUFFERS_GEN_TYPES_POINTER(FLATBUFFERS_TD)
|
||||
@ -1581,7 +1664,7 @@ CheckedError Parser::ParseTable(const StructDef &struct_def, std::string *value,
|
||||
}
|
||||
|
||||
template<typename F>
|
||||
CheckedError Parser::ParseVectorDelimiters(uoffset_t &count, F body) {
|
||||
CheckedError Parser::ParseVectorDelimiters(size_t &count, F body) {
|
||||
EXPECT('[');
|
||||
for (;;) {
|
||||
if ((!opts.strict_json || !count) && Is(']')) break;
|
||||
@ -1611,10 +1694,11 @@ CheckedError Parser::ParseAlignAttribute(const std::string &align_constant,
|
||||
NumToString(FLATBUFFERS_MAX_ALIGNMENT));
|
||||
}
|
||||
|
||||
CheckedError Parser::ParseVector(const Type &type, uoffset_t *ovalue,
|
||||
CheckedError Parser::ParseVector(const Type &vector_type, uoffset_t *ovalue,
|
||||
FieldDef *field, size_t fieldn) {
|
||||
uoffset_t count = 0;
|
||||
auto err = ParseVectorDelimiters(count, [&](uoffset_t &) -> CheckedError {
|
||||
Type type = vector_type.VectorType();
|
||||
size_t count = 0;
|
||||
auto err = ParseVectorDelimiters(count, [&](size_t &) -> CheckedError {
|
||||
Value val;
|
||||
val.type = type;
|
||||
ECHECK(ParseAnyValue(val, field, fieldn, nullptr, count, true));
|
||||
@ -1634,12 +1718,18 @@ CheckedError Parser::ParseVector(const Type &type, uoffset_t *ovalue,
|
||||
}
|
||||
|
||||
// TODO Fix using element alignment as size (`elemsize`)!
|
||||
builder_.StartVector(len, elemsize, alignment);
|
||||
for (uoffset_t i = 0; i < count; i++) {
|
||||
if (vector_type.base_type == BASE_TYPE_VECTOR64) {
|
||||
// TODO(derekbailey): this requires a 64-bit builder.
|
||||
// builder_.StartVector<Offset64, uoffset64_t>(len, elemsize, alignment);
|
||||
builder_.StartVector(len, elemsize, alignment);
|
||||
} else {
|
||||
builder_.StartVector(len, elemsize, alignment);
|
||||
}
|
||||
for (size_t i = 0; i < count; i++) {
|
||||
// start at the back, since we're building the data backwards.
|
||||
auto &val = field_stack_.back().first;
|
||||
switch (val.type.base_type) {
|
||||
// clang-format off
|
||||
// clang-format off
|
||||
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE,...) \
|
||||
case BASE_TYPE_ ## ENUM: \
|
||||
if (IsStruct(val.type)) SerializeStruct(*val.type.struct_def, val); \
|
||||
@ -1657,7 +1747,11 @@ CheckedError Parser::ParseVector(const Type &type, uoffset_t *ovalue,
|
||||
}
|
||||
|
||||
builder_.ClearOffsets();
|
||||
*ovalue = builder_.EndVector(count);
|
||||
if (vector_type.base_type == BASE_TYPE_VECTOR64) {
|
||||
*ovalue = builder_.EndVector<uoffset64_t>(count);
|
||||
} else {
|
||||
*ovalue = builder_.EndVector(count);
|
||||
}
|
||||
|
||||
if (type.base_type == BASE_TYPE_STRUCT && type.struct_def->has_key) {
|
||||
// We should sort this vector. Find the key first.
|
||||
@ -1725,8 +1819,8 @@ CheckedError Parser::ParseArray(Value &array) {
|
||||
FlatBufferBuilder builder;
|
||||
const auto &type = array.type.VectorType();
|
||||
auto length = array.type.fixed_length;
|
||||
uoffset_t count = 0;
|
||||
auto err = ParseVectorDelimiters(count, [&](uoffset_t &) -> CheckedError {
|
||||
size_t count = 0;
|
||||
auto err = ParseVectorDelimiters(count, [&](size_t &) -> CheckedError {
|
||||
stack.emplace_back(Value());
|
||||
auto &val = stack.back();
|
||||
val.type = type;
|
||||
@ -1977,8 +2071,7 @@ CheckedError Parser::TryTypedValue(const std::string *name, int dtoken,
|
||||
e.type.base_type = req;
|
||||
} else {
|
||||
return Error(std::string("type mismatch: expecting: ") +
|
||||
TypeName(e.type.base_type) +
|
||||
", found: " + TypeName(req) +
|
||||
TypeName(e.type.base_type) + ", found: " + TypeName(req) +
|
||||
", name: " + (name ? *name : "") + ", value: " + e.constant);
|
||||
}
|
||||
}
|
||||
@ -2595,6 +2688,11 @@ bool Parser::SupportsAdvancedArrayFeatures() const {
|
||||
IDLOptions::kBinary | IDLOptions::kRust | IDLOptions::kTs)) == 0;
|
||||
}
|
||||
|
||||
bool Parser::Supports64BitOffsets() const {
|
||||
return (opts.lang_to_generate &
|
||||
~(IDLOptions::kCpp | IDLOptions::kJson | IDLOptions::kBinary)) == 0;
|
||||
}
|
||||
|
||||
Namespace *Parser::UniqueNamespace(Namespace *ns) {
|
||||
for (auto it = namespaces_.begin(); it != namespaces_.end(); ++it) {
|
||||
if (ns->components == (*it)->components) {
|
||||
@ -3217,10 +3315,9 @@ CheckedError Parser::SkipAnyJsonValue() {
|
||||
});
|
||||
}
|
||||
case '[': {
|
||||
uoffset_t count = 0;
|
||||
return ParseVectorDelimiters(count, [&](uoffset_t &) -> CheckedError {
|
||||
return SkipAnyJsonValue();
|
||||
});
|
||||
size_t count = 0;
|
||||
return ParseVectorDelimiters(
|
||||
count, [&](size_t &) -> CheckedError { return SkipAnyJsonValue(); });
|
||||
}
|
||||
case kTokenStringConstant:
|
||||
case kTokenIntegerConstant:
|
||||
@ -3269,8 +3366,8 @@ CheckedError Parser::ParseFlexBufferValue(flexbuffers::Builder *builder) {
|
||||
}
|
||||
case '[': {
|
||||
auto start = builder->StartVector();
|
||||
uoffset_t count = 0;
|
||||
ECHECK(ParseVectorDelimiters(count, [&](uoffset_t &) -> CheckedError {
|
||||
size_t count = 0;
|
||||
ECHECK(ParseVectorDelimiters(count, [&](size_t &) -> CheckedError {
|
||||
return ParseFlexBufferValue(builder);
|
||||
}));
|
||||
builder->EndVector(start, false, false);
|
||||
@ -3922,7 +4019,7 @@ Offset<reflection::Field> FieldDef::Serialize(FlatBufferBuilder *builder,
|
||||
IsInteger(value.type.base_type) ? StringToInt(value.constant.c_str()) : 0,
|
||||
// result may be platform-dependent if underlying is float (not double)
|
||||
IsFloat(value.type.base_type) ? d : 0.0, deprecated, IsRequired(), key,
|
||||
attr__, docs__, IsOptional(), static_cast<uint16_t>(padding));
|
||||
attr__, docs__, IsOptional(), static_cast<uint16_t>(padding), offset64);
|
||||
// TODO: value.constant is almost always "0", we could save quite a bit of
|
||||
// space by sharing it. Same for common values of value.type.
|
||||
}
|
||||
@ -3940,6 +4037,7 @@ bool FieldDef::Deserialize(Parser &parser, const reflection::Field *field) {
|
||||
presence = FieldDef::MakeFieldPresence(field->optional(), field->required());
|
||||
padding = field->padding();
|
||||
key = field->key();
|
||||
offset64 = field->offset64();
|
||||
if (!DeserializeAttributes(parser, field->attributes())) return false;
|
||||
// TODO: this should probably be handled by a separate attribute
|
||||
if (attributes.Lookup("flexbuffer")) {
|
||||
@ -4264,12 +4362,18 @@ std::string Parser::ConformTo(const Parser &base) {
|
||||
auto field_base = struct_def_base->fields.Lookup(field.name);
|
||||
const auto qualified_field_name = qualified_name + "." + field.name;
|
||||
if (field_base) {
|
||||
if (field.value.offset != field_base->value.offset)
|
||||
if (field.value.offset != field_base->value.offset) {
|
||||
return "offsets differ for field: " + qualified_field_name;
|
||||
if (field.value.constant != field_base->value.constant)
|
||||
}
|
||||
if (field.value.constant != field_base->value.constant) {
|
||||
return "defaults differ for field: " + qualified_field_name;
|
||||
if (!EqualByName(field.value.type, field_base->value.type))
|
||||
}
|
||||
if (!EqualByName(field.value.type, field_base->value.type)) {
|
||||
return "types differ for field: " + qualified_field_name;
|
||||
}
|
||||
if (field.offset64 != field_base->offset64) {
|
||||
return "offset types differ for field: " + qualified_field_name;
|
||||
}
|
||||
} else {
|
||||
// Doesn't have to exist, deleting fields is fine.
|
||||
// But we should check if there is a field that has the same offset
|
||||
|
8
tests/64bit/evolution/v1.fbs
Normal file
8
tests/64bit/evolution/v1.fbs
Normal file
@ -0,0 +1,8 @@
|
||||
namespace v1;
|
||||
|
||||
table RootTable {
|
||||
a:float;
|
||||
b:[uint8];
|
||||
}
|
||||
|
||||
root_type RootTable;
|
219
tests/64bit/evolution/v1_generated.h
Normal file
219
tests/64bit/evolution/v1_generated.h
Normal file
@ -0,0 +1,219 @@
|
||||
// automatically generated by the FlatBuffers compiler, do not modify
|
||||
|
||||
|
||||
#ifndef FLATBUFFERS_GENERATED_V1_V1_H_
|
||||
#define FLATBUFFERS_GENERATED_V1_V1_H_
|
||||
|
||||
#include "flatbuffers/flatbuffers.h"
|
||||
|
||||
// Ensure the included flatbuffers.h is the same version as when this file was
|
||||
// generated, otherwise it may not be compatible.
|
||||
static_assert(FLATBUFFERS_VERSION_MAJOR == 23 &&
|
||||
FLATBUFFERS_VERSION_MINOR == 5 &&
|
||||
FLATBUFFERS_VERSION_REVISION == 8,
|
||||
"Non-compatible flatbuffers version included");
|
||||
|
||||
namespace v1 {
|
||||
|
||||
struct RootTable;
|
||||
struct RootTableBuilder;
|
||||
struct RootTableT;
|
||||
|
||||
bool operator==(const RootTableT &lhs, const RootTableT &rhs);
|
||||
bool operator!=(const RootTableT &lhs, const RootTableT &rhs);
|
||||
|
||||
inline const ::flatbuffers::TypeTable *RootTableTypeTable();
|
||||
|
||||
struct RootTableT : public ::flatbuffers::NativeTable {
|
||||
typedef RootTable TableType;
|
||||
float a = 0.0f;
|
||||
std::vector<uint8_t> b{};
|
||||
};
|
||||
|
||||
struct RootTable FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
|
||||
typedef RootTableT NativeTableType;
|
||||
typedef RootTableBuilder Builder;
|
||||
static const ::flatbuffers::TypeTable *MiniReflectTypeTable() {
|
||||
return RootTableTypeTable();
|
||||
}
|
||||
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
|
||||
VT_A = 4,
|
||||
VT_B = 6
|
||||
};
|
||||
float a() const {
|
||||
return GetField<float>(VT_A, 0.0f);
|
||||
}
|
||||
bool mutate_a(float _a = 0.0f) {
|
||||
return SetField<float>(VT_A, _a, 0.0f);
|
||||
}
|
||||
const ::flatbuffers::Vector<uint8_t> *b() const {
|
||||
return GetPointer<const ::flatbuffers::Vector<uint8_t> *>(VT_B);
|
||||
}
|
||||
::flatbuffers::Vector<uint8_t> *mutable_b() {
|
||||
return GetPointer<::flatbuffers::Vector<uint8_t> *>(VT_B);
|
||||
}
|
||||
bool Verify(::flatbuffers::Verifier &verifier) const {
|
||||
return VerifyTableStart(verifier) &&
|
||||
VerifyField<float>(verifier, VT_A, 4) &&
|
||||
VerifyOffset(verifier, VT_B) &&
|
||||
verifier.VerifyVector(b()) &&
|
||||
verifier.EndTable();
|
||||
}
|
||||
RootTableT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
|
||||
void UnPackTo(RootTableT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
|
||||
static ::flatbuffers::Offset<RootTable> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RootTableT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
|
||||
};
|
||||
|
||||
struct RootTableBuilder {
|
||||
typedef RootTable Table;
|
||||
::flatbuffers::FlatBufferBuilder &fbb_;
|
||||
::flatbuffers::uoffset_t start_;
|
||||
void add_a(float a) {
|
||||
fbb_.AddElement<float>(RootTable::VT_A, a, 0.0f);
|
||||
}
|
||||
void add_b(::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> b) {
|
||||
fbb_.AddOffset(RootTable::VT_B, b);
|
||||
}
|
||||
explicit RootTableBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
|
||||
: fbb_(_fbb) {
|
||||
start_ = fbb_.StartTable();
|
||||
}
|
||||
::flatbuffers::Offset<RootTable> Finish() {
|
||||
const auto end = fbb_.EndTable(start_);
|
||||
auto o = ::flatbuffers::Offset<RootTable>(end);
|
||||
return o;
|
||||
}
|
||||
};
|
||||
|
||||
inline ::flatbuffers::Offset<RootTable> CreateRootTable(
|
||||
::flatbuffers::FlatBufferBuilder &_fbb,
|
||||
float a = 0.0f,
|
||||
::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> b = 0) {
|
||||
RootTableBuilder builder_(_fbb);
|
||||
builder_.add_b(b);
|
||||
builder_.add_a(a);
|
||||
return builder_.Finish();
|
||||
}
|
||||
|
||||
inline ::flatbuffers::Offset<RootTable> CreateRootTableDirect(
|
||||
::flatbuffers::FlatBufferBuilder &_fbb,
|
||||
float a = 0.0f,
|
||||
const std::vector<uint8_t> *b = nullptr) {
|
||||
auto b__ = b ? _fbb.CreateVector<uint8_t>(*b) : 0;
|
||||
return v1::CreateRootTable(
|
||||
_fbb,
|
||||
a,
|
||||
b__);
|
||||
}
|
||||
|
||||
::flatbuffers::Offset<RootTable> CreateRootTable(::flatbuffers::FlatBufferBuilder &_fbb, const RootTableT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
|
||||
|
||||
|
||||
inline bool operator==(const RootTableT &lhs, const RootTableT &rhs) {
|
||||
return
|
||||
(lhs.a == rhs.a) &&
|
||||
(lhs.b == rhs.b);
|
||||
}
|
||||
|
||||
inline bool operator!=(const RootTableT &lhs, const RootTableT &rhs) {
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
|
||||
inline RootTableT *RootTable::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
|
||||
auto _o = std::unique_ptr<RootTableT>(new RootTableT());
|
||||
UnPackTo(_o.get(), _resolver);
|
||||
return _o.release();
|
||||
}
|
||||
|
||||
inline void RootTable::UnPackTo(RootTableT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
|
||||
(void)_o;
|
||||
(void)_resolver;
|
||||
{ auto _e = a(); _o->a = _e; }
|
||||
{ auto _e = b(); if (_e) { _o->b.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->b.begin()); } }
|
||||
}
|
||||
|
||||
inline ::flatbuffers::Offset<RootTable> RootTable::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const RootTableT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
|
||||
return CreateRootTable(_fbb, _o, _rehasher);
|
||||
}
|
||||
|
||||
inline ::flatbuffers::Offset<RootTable> CreateRootTable(::flatbuffers::FlatBufferBuilder &_fbb, const RootTableT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
|
||||
(void)_rehasher;
|
||||
(void)_o;
|
||||
struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const RootTableT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
|
||||
auto _a = _o->a;
|
||||
auto _b = _o->b.size() ? _fbb.CreateVector(_o->b) : 0;
|
||||
return v1::CreateRootTable(
|
||||
_fbb,
|
||||
_a,
|
||||
_b);
|
||||
}
|
||||
|
||||
inline const ::flatbuffers::TypeTable *RootTableTypeTable() {
|
||||
static const ::flatbuffers::TypeCode type_codes[] = {
|
||||
{ ::flatbuffers::ET_FLOAT, 0, -1 },
|
||||
{ ::flatbuffers::ET_UCHAR, 1, -1 }
|
||||
};
|
||||
static const char * const names[] = {
|
||||
"a",
|
||||
"b"
|
||||
};
|
||||
static const ::flatbuffers::TypeTable tt = {
|
||||
::flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, nullptr, names
|
||||
};
|
||||
return &tt;
|
||||
}
|
||||
|
||||
inline const v1::RootTable *GetRootTable(const void *buf) {
|
||||
return ::flatbuffers::GetRoot<v1::RootTable>(buf);
|
||||
}
|
||||
|
||||
inline const v1::RootTable *GetSizePrefixedRootTable(const void *buf) {
|
||||
return ::flatbuffers::GetSizePrefixedRoot<v1::RootTable>(buf);
|
||||
}
|
||||
|
||||
inline RootTable *GetMutableRootTable(void *buf) {
|
||||
return ::flatbuffers::GetMutableRoot<RootTable>(buf);
|
||||
}
|
||||
|
||||
inline v1::RootTable *GetMutableSizePrefixedRootTable(void *buf) {
|
||||
return ::flatbuffers::GetMutableSizePrefixedRoot<v1::RootTable>(buf);
|
||||
}
|
||||
|
||||
inline bool VerifyRootTableBuffer(
|
||||
::flatbuffers::Verifier &verifier) {
|
||||
return verifier.VerifyBuffer<v1::RootTable>(nullptr);
|
||||
}
|
||||
|
||||
inline bool VerifySizePrefixedRootTableBuffer(
|
||||
::flatbuffers::Verifier &verifier) {
|
||||
return verifier.VerifySizePrefixedBuffer<v1::RootTable>(nullptr);
|
||||
}
|
||||
|
||||
inline void FinishRootTableBuffer(
|
||||
::flatbuffers::FlatBufferBuilder &fbb,
|
||||
::flatbuffers::Offset<v1::RootTable> root) {
|
||||
fbb.Finish(root);
|
||||
}
|
||||
|
||||
inline void FinishSizePrefixedRootTableBuffer(
|
||||
::flatbuffers::FlatBufferBuilder &fbb,
|
||||
::flatbuffers::Offset<v1::RootTable> root) {
|
||||
fbb.FinishSizePrefixed(root);
|
||||
}
|
||||
|
||||
inline std::unique_ptr<v1::RootTableT> UnPackRootTable(
|
||||
const void *buf,
|
||||
const ::flatbuffers::resolver_function_t *res = nullptr) {
|
||||
return std::unique_ptr<v1::RootTableT>(GetRootTable(buf)->UnPack(res));
|
||||
}
|
||||
|
||||
inline std::unique_ptr<v1::RootTableT> UnPackSizePrefixedRootTable(
|
||||
const void *buf,
|
||||
const ::flatbuffers::resolver_function_t *res = nullptr) {
|
||||
return std::unique_ptr<v1::RootTableT>(GetSizePrefixedRootTable(buf)->UnPack(res));
|
||||
}
|
||||
|
||||
} // namespace v1
|
||||
|
||||
#endif // FLATBUFFERS_GENERATED_V1_V1_H_
|
9
tests/64bit/evolution/v2.fbs
Normal file
9
tests/64bit/evolution/v2.fbs
Normal file
@ -0,0 +1,9 @@
|
||||
namespace v2;
|
||||
|
||||
table RootTable {
|
||||
a:float;
|
||||
b:[uint8];
|
||||
big_vector:[uint8] (vector64);
|
||||
}
|
||||
|
||||
root_type RootTable;
|
243
tests/64bit/evolution/v2_generated.h
Normal file
243
tests/64bit/evolution/v2_generated.h
Normal file
@ -0,0 +1,243 @@
|
||||
// automatically generated by the FlatBuffers compiler, do not modify
|
||||
|
||||
|
||||
#ifndef FLATBUFFERS_GENERATED_V2_V2_H_
|
||||
#define FLATBUFFERS_GENERATED_V2_V2_H_
|
||||
|
||||
#include "flatbuffers/flatbuffers.h"
|
||||
|
||||
// Ensure the included flatbuffers.h is the same version as when this file was
|
||||
// generated, otherwise it may not be compatible.
|
||||
static_assert(FLATBUFFERS_VERSION_MAJOR == 23 &&
|
||||
FLATBUFFERS_VERSION_MINOR == 5 &&
|
||||
FLATBUFFERS_VERSION_REVISION == 8,
|
||||
"Non-compatible flatbuffers version included");
|
||||
|
||||
namespace v2 {
|
||||
|
||||
struct RootTable;
|
||||
struct RootTableBuilder;
|
||||
struct RootTableT;
|
||||
|
||||
bool operator==(const RootTableT &lhs, const RootTableT &rhs);
|
||||
bool operator!=(const RootTableT &lhs, const RootTableT &rhs);
|
||||
|
||||
inline const ::flatbuffers::TypeTable *RootTableTypeTable();
|
||||
|
||||
struct RootTableT : public ::flatbuffers::NativeTable {
|
||||
typedef RootTable TableType;
|
||||
float a = 0.0f;
|
||||
std::vector<uint8_t> b{};
|
||||
std::vector<uint8_t> big_vector{};
|
||||
};
|
||||
|
||||
struct RootTable FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
|
||||
typedef RootTableT NativeTableType;
|
||||
typedef RootTableBuilder Builder;
|
||||
static const ::flatbuffers::TypeTable *MiniReflectTypeTable() {
|
||||
return RootTableTypeTable();
|
||||
}
|
||||
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
|
||||
VT_A = 4,
|
||||
VT_B = 6,
|
||||
VT_BIG_VECTOR = 8
|
||||
};
|
||||
float a() const {
|
||||
return GetField<float>(VT_A, 0.0f);
|
||||
}
|
||||
bool mutate_a(float _a = 0.0f) {
|
||||
return SetField<float>(VT_A, _a, 0.0f);
|
||||
}
|
||||
const ::flatbuffers::Vector<uint8_t> *b() const {
|
||||
return GetPointer<const ::flatbuffers::Vector<uint8_t> *>(VT_B);
|
||||
}
|
||||
::flatbuffers::Vector<uint8_t> *mutable_b() {
|
||||
return GetPointer<::flatbuffers::Vector<uint8_t> *>(VT_B);
|
||||
}
|
||||
const ::flatbuffers::Vector64<uint8_t> *big_vector() const {
|
||||
return GetPointer64<const ::flatbuffers::Vector64<uint8_t> *>(VT_BIG_VECTOR);
|
||||
}
|
||||
::flatbuffers::Vector64<uint8_t> *mutable_big_vector() {
|
||||
return GetPointer64<::flatbuffers::Vector64<uint8_t> *>(VT_BIG_VECTOR);
|
||||
}
|
||||
bool Verify(::flatbuffers::Verifier &verifier) const {
|
||||
return VerifyTableStart(verifier) &&
|
||||
VerifyField<float>(verifier, VT_A, 4) &&
|
||||
VerifyOffset(verifier, VT_B) &&
|
||||
verifier.VerifyVector(b()) &&
|
||||
VerifyOffset64(verifier, VT_BIG_VECTOR) &&
|
||||
verifier.VerifyVector(big_vector()) &&
|
||||
verifier.EndTable();
|
||||
}
|
||||
RootTableT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
|
||||
void UnPackTo(RootTableT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
|
||||
static ::flatbuffers::Offset<RootTable> Pack(::flatbuffers::FlatBufferBuilder64 &_fbb, const RootTableT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
|
||||
};
|
||||
|
||||
struct RootTableBuilder {
|
||||
typedef RootTable Table;
|
||||
::flatbuffers::FlatBufferBuilder64 &fbb_;
|
||||
::flatbuffers::uoffset_t start_;
|
||||
void add_a(float a) {
|
||||
fbb_.AddElement<float>(RootTable::VT_A, a, 0.0f);
|
||||
}
|
||||
void add_b(::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> b) {
|
||||
fbb_.AddOffset(RootTable::VT_B, b);
|
||||
}
|
||||
void add_big_vector(::flatbuffers::Offset64<::flatbuffers::Vector64<uint8_t>> big_vector) {
|
||||
fbb_.AddOffset(RootTable::VT_BIG_VECTOR, big_vector);
|
||||
}
|
||||
explicit RootTableBuilder(::flatbuffers::FlatBufferBuilder64 &_fbb)
|
||||
: fbb_(_fbb) {
|
||||
start_ = fbb_.StartTable();
|
||||
}
|
||||
::flatbuffers::Offset<RootTable> Finish() {
|
||||
const auto end = fbb_.EndTable(start_);
|
||||
auto o = ::flatbuffers::Offset<RootTable>(end);
|
||||
return o;
|
||||
}
|
||||
};
|
||||
|
||||
inline ::flatbuffers::Offset<RootTable> CreateRootTable(
|
||||
::flatbuffers::FlatBufferBuilder64 &_fbb,
|
||||
float a = 0.0f,
|
||||
::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> b = 0,
|
||||
::flatbuffers::Offset64<::flatbuffers::Vector64<uint8_t>> big_vector = 0) {
|
||||
RootTableBuilder builder_(_fbb);
|
||||
builder_.add_big_vector(big_vector);
|
||||
builder_.add_b(b);
|
||||
builder_.add_a(a);
|
||||
return builder_.Finish();
|
||||
}
|
||||
|
||||
inline ::flatbuffers::Offset<RootTable> CreateRootTableDirect(
|
||||
::flatbuffers::FlatBufferBuilder64 &_fbb,
|
||||
float a = 0.0f,
|
||||
const std::vector<uint8_t> *b = nullptr,
|
||||
const std::vector<uint8_t> *big_vector = nullptr) {
|
||||
auto big_vector__ = big_vector ? _fbb.CreateVector64(*big_vector) : 0;
|
||||
auto b__ = b ? _fbb.CreateVector<uint8_t>(*b) : 0;
|
||||
return v2::CreateRootTable(
|
||||
_fbb,
|
||||
a,
|
||||
b__,
|
||||
big_vector__);
|
||||
}
|
||||
|
||||
::flatbuffers::Offset<RootTable> CreateRootTable(::flatbuffers::FlatBufferBuilder64 &_fbb, const RootTableT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
|
||||
|
||||
|
||||
inline bool operator==(const RootTableT &lhs, const RootTableT &rhs) {
|
||||
return
|
||||
(lhs.a == rhs.a) &&
|
||||
(lhs.b == rhs.b) &&
|
||||
(lhs.big_vector == rhs.big_vector);
|
||||
}
|
||||
|
||||
inline bool operator!=(const RootTableT &lhs, const RootTableT &rhs) {
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
|
||||
inline RootTableT *RootTable::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
|
||||
auto _o = std::unique_ptr<RootTableT>(new RootTableT());
|
||||
UnPackTo(_o.get(), _resolver);
|
||||
return _o.release();
|
||||
}
|
||||
|
||||
inline void RootTable::UnPackTo(RootTableT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
|
||||
(void)_o;
|
||||
(void)_resolver;
|
||||
{ auto _e = a(); _o->a = _e; }
|
||||
{ auto _e = b(); if (_e) { _o->b.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->b.begin()); } }
|
||||
{ auto _e = big_vector(); if (_e) { _o->big_vector.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->big_vector.begin()); } }
|
||||
}
|
||||
|
||||
inline ::flatbuffers::Offset<RootTable> RootTable::Pack(::flatbuffers::FlatBufferBuilder64 &_fbb, const RootTableT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
|
||||
return CreateRootTable(_fbb, _o, _rehasher);
|
||||
}
|
||||
|
||||
inline ::flatbuffers::Offset<RootTable> CreateRootTable(::flatbuffers::FlatBufferBuilder64 &_fbb, const RootTableT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
|
||||
(void)_rehasher;
|
||||
(void)_o;
|
||||
struct _VectorArgs { ::flatbuffers::FlatBufferBuilder64 *__fbb; const RootTableT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
|
||||
auto _a = _o->a;
|
||||
auto _b = _o->b.size() ? _fbb.CreateVector(_o->b) : 0;
|
||||
auto _big_vector = _o->big_vector.size() ? _fbb.CreateVector64(_o->big_vector) : 0;
|
||||
return v2::CreateRootTable(
|
||||
_fbb,
|
||||
_a,
|
||||
_b,
|
||||
_big_vector);
|
||||
}
|
||||
|
||||
inline const ::flatbuffers::TypeTable *RootTableTypeTable() {
|
||||
static const ::flatbuffers::TypeCode type_codes[] = {
|
||||
{ ::flatbuffers::ET_FLOAT, 0, -1 },
|
||||
{ ::flatbuffers::ET_UCHAR, 1, -1 },
|
||||
{ ::flatbuffers::ET_UCHAR, 1, -1 }
|
||||
};
|
||||
static const char * const names[] = {
|
||||
"a",
|
||||
"b",
|
||||
"big_vector"
|
||||
};
|
||||
static const ::flatbuffers::TypeTable tt = {
|
||||
::flatbuffers::ST_TABLE, 3, type_codes, nullptr, nullptr, nullptr, names
|
||||
};
|
||||
return &tt;
|
||||
}
|
||||
|
||||
inline const v2::RootTable *GetRootTable(const void *buf) {
|
||||
return ::flatbuffers::GetRoot<v2::RootTable>(buf);
|
||||
}
|
||||
|
||||
inline const v2::RootTable *GetSizePrefixedRootTable(const void *buf) {
|
||||
return ::flatbuffers::GetSizePrefixedRoot<v2::RootTable,::flatbuffers::uoffset64_t>(buf);
|
||||
}
|
||||
|
||||
inline RootTable *GetMutableRootTable(void *buf) {
|
||||
return ::flatbuffers::GetMutableRoot<RootTable>(buf);
|
||||
}
|
||||
|
||||
inline v2::RootTable *GetMutableSizePrefixedRootTable(void *buf) {
|
||||
return ::flatbuffers::GetMutableSizePrefixedRoot<v2::RootTable,::flatbuffers::uoffset64_t>(buf);
|
||||
}
|
||||
|
||||
inline bool VerifyRootTableBuffer(
|
||||
::flatbuffers::Verifier &verifier) {
|
||||
return verifier.VerifyBuffer<v2::RootTable>(nullptr);
|
||||
}
|
||||
|
||||
inline bool VerifySizePrefixedRootTableBuffer(
|
||||
::flatbuffers::Verifier &verifier) {
|
||||
return verifier.VerifySizePrefixedBuffer<v2::RootTable,::flatbuffers::uoffset64_t>(nullptr);
|
||||
}
|
||||
|
||||
inline void FinishRootTableBuffer(
|
||||
::flatbuffers::FlatBufferBuilder64 &fbb,
|
||||
::flatbuffers::Offset<v2::RootTable> root) {
|
||||
fbb.Finish(root);
|
||||
}
|
||||
|
||||
inline void FinishSizePrefixedRootTableBuffer(
|
||||
::flatbuffers::FlatBufferBuilder64 &fbb,
|
||||
::flatbuffers::Offset<v2::RootTable> root) {
|
||||
fbb.FinishSizePrefixed(root);
|
||||
}
|
||||
|
||||
inline std::unique_ptr<v2::RootTableT> UnPackRootTable(
|
||||
const void *buf,
|
||||
const ::flatbuffers::resolver_function_t *res = nullptr) {
|
||||
return std::unique_ptr<v2::RootTableT>(GetRootTable(buf)->UnPack(res));
|
||||
}
|
||||
|
||||
inline std::unique_ptr<v2::RootTableT> UnPackSizePrefixedRootTable(
|
||||
const void *buf,
|
||||
const ::flatbuffers::resolver_function_t *res = nullptr) {
|
||||
return std::unique_ptr<v2::RootTableT>(GetSizePrefixedRootTable(buf)->UnPack(res));
|
||||
}
|
||||
|
||||
} // namespace v2
|
||||
|
||||
#endif // FLATBUFFERS_GENERATED_V2_V2_H_
|
447
tests/64bit/offset64_test.cpp
Normal file
447
tests/64bit/offset64_test.cpp
Normal file
@ -0,0 +1,447 @@
|
||||
#include "offset64_test.h"
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <fstream>
|
||||
#include <limits>
|
||||
#include <ostream>
|
||||
|
||||
#include "evolution/v1_generated.h"
|
||||
#include "evolution/v2_generated.h"
|
||||
#include "flatbuffers/base.h"
|
||||
#include "flatbuffers/buffer.h"
|
||||
#include "flatbuffers/flatbuffer_builder.h"
|
||||
#include "flatbuffers/flatbuffers.h"
|
||||
#include "test_64bit_generated.h"
|
||||
#include "test_assert.h"
|
||||
|
||||
namespace flatbuffers {
|
||||
namespace tests {
|
||||
|
||||
void Offset64Test() {
|
||||
FlatBufferBuilder64 builder;
|
||||
|
||||
const size_t far_vector_size = 1LL << 2;
|
||||
const size_t big_vector_size = 1LL << 31;
|
||||
|
||||
{
|
||||
// First create the vectors that will be copied to the buffer.
|
||||
std::vector<uint8_t> far_data;
|
||||
far_data.resize(far_vector_size);
|
||||
far_data[0] = 4;
|
||||
far_data[far_vector_size - 1] = 2;
|
||||
|
||||
std::vector<uint8_t> big_data;
|
||||
big_data.resize(big_vector_size);
|
||||
big_data[0] = 8;
|
||||
big_data[big_vector_size - 1] = 3;
|
||||
|
||||
// Then serialize all the fields that have 64-bit offsets, as these must be
|
||||
// serialized before any 32-bit fields are added to the buffer.
|
||||
const Offset64<Vector<uint8_t>> far_vector_offset =
|
||||
builder.CreateVector64<Vector>(far_data);
|
||||
|
||||
const Offset64<String> far_string_offset =
|
||||
builder.CreateString<Offset64>("some far string");
|
||||
|
||||
const Offset64<Vector64<uint8_t>> big_vector_offset =
|
||||
builder.CreateVector64(big_data);
|
||||
|
||||
// Now that we are done with the 64-bit fields, we can create and add the
|
||||
// normal fields.
|
||||
const Offset<String> near_string_offset =
|
||||
builder.CreateString("some near string");
|
||||
|
||||
// Finish by building the root table by passing in all the offsets.
|
||||
const Offset<RootTable> root_table_offset =
|
||||
CreateRootTable(builder, far_vector_offset, 0, far_string_offset,
|
||||
big_vector_offset, near_string_offset);
|
||||
|
||||
// Finish the buffer.
|
||||
builder.Finish(root_table_offset);
|
||||
|
||||
// Ensure the buffer is big.
|
||||
TEST_ASSERT(builder.GetSize() > FLATBUFFERS_MAX_BUFFER_SIZE);
|
||||
|
||||
Verifier::Options options;
|
||||
// Allow the verifier to verify 64-bit buffers.
|
||||
options.max_size = FLATBUFFERS_MAX_64_BUFFER_SIZE;
|
||||
options.assert = true;
|
||||
|
||||
Verifier verifier(builder.GetBufferPointer(), builder.GetSize(), options);
|
||||
|
||||
TEST_EQ(VerifyRootTableBuffer(verifier), true);
|
||||
}
|
||||
|
||||
{
|
||||
const RootTable *root_table = GetRootTable(builder.GetBufferPointer());
|
||||
|
||||
// Expect the far vector to be properly sized.
|
||||
TEST_EQ(root_table->far_vector()->size(), far_vector_size);
|
||||
TEST_EQ(root_table->far_vector()->Get(0), 4);
|
||||
TEST_EQ(root_table->far_vector()->Get(far_vector_size - 1), 2);
|
||||
|
||||
TEST_EQ_STR(root_table->far_string()->c_str(), "some far string");
|
||||
|
||||
// Expect the big vector to be properly sized.
|
||||
TEST_EQ(root_table->big_vector()->size(), big_vector_size);
|
||||
TEST_EQ(root_table->big_vector()->Get(0), 8);
|
||||
TEST_EQ(root_table->big_vector()->Get(big_vector_size - 1), 3);
|
||||
|
||||
TEST_EQ_STR(root_table->near_string()->c_str(), "some near string");
|
||||
}
|
||||
}
|
||||
|
||||
void Offset64SerializedFirst() {
|
||||
FlatBufferBuilder64 fbb;
|
||||
|
||||
// First create the vectors that will be copied to the buffer.
|
||||
std::vector<uint8_t> data;
|
||||
data.resize(64);
|
||||
|
||||
// Then serialize all the fields that have 64-bit offsets, as these must be
|
||||
// serialized before any 32-bit fields are added to the buffer.
|
||||
fbb.CreateVector64(data);
|
||||
|
||||
// TODO(derekbailey): figure out how to test assertions.
|
||||
// Uncommenting this line should fail the test with an assertion.
|
||||
// fbb.CreateString("some near string");
|
||||
|
||||
fbb.CreateVector64(data);
|
||||
}
|
||||
|
||||
void Offset64NestedFlatBuffer() {
|
||||
FlatBufferBuilder64 fbb;
|
||||
|
||||
// First serialize a nested buffer.
|
||||
const Offset<String> near_string_offset =
|
||||
fbb.CreateString("nested: some near string");
|
||||
|
||||
// Finish by building the root table by passing in all the offsets.
|
||||
const Offset<RootTable> root_table_offset =
|
||||
CreateRootTable(fbb, 0, 0, 0, 0, near_string_offset, 0);
|
||||
|
||||
// Finish the buffer.
|
||||
fbb.Finish(root_table_offset);
|
||||
|
||||
// Ensure the buffer is valid.
|
||||
const RootTable *root_table = GetRootTable(fbb.GetBufferPointer());
|
||||
TEST_EQ_STR(root_table->near_string()->c_str(), "nested: some near string");
|
||||
|
||||
// Copy the data out of the builder.
|
||||
std::vector<uint8_t> nested_data{ fbb.GetBufferPointer(),
|
||||
fbb.GetBufferPointer() + fbb.GetSize() };
|
||||
|
||||
{
|
||||
// Clear so we can reuse the builder.
|
||||
fbb.Clear();
|
||||
|
||||
const Offset64<Vector64<uint8_t>> nested_flatbuffer_offset =
|
||||
fbb.CreateVector64<Vector64>(nested_data);
|
||||
|
||||
// Now that we are done with the 64-bit fields, we can create and add the
|
||||
// normal fields.
|
||||
const Offset<String> near_string_offset =
|
||||
fbb.CreateString("some near string");
|
||||
|
||||
// Finish by building the root table by passing in all the offsets.
|
||||
const Offset<RootTable> root_table_offset = CreateRootTable(
|
||||
fbb, 0, 0, 0, 0, near_string_offset, nested_flatbuffer_offset);
|
||||
|
||||
// Finish the buffer.
|
||||
fbb.Finish(root_table_offset);
|
||||
|
||||
Verifier::Options options;
|
||||
// Allow the verifier to verify 64-bit buffers.
|
||||
options.max_size = FLATBUFFERS_MAX_64_BUFFER_SIZE;
|
||||
options.assert = true;
|
||||
|
||||
Verifier verifier(fbb.GetBufferPointer(), fbb.GetSize(), options);
|
||||
|
||||
TEST_EQ(VerifyRootTableBuffer(verifier), true);
|
||||
}
|
||||
|
||||
{
|
||||
const RootTable *root_table = GetRootTable(fbb.GetBufferPointer());
|
||||
|
||||
// Test that the parent buffer field is ok.
|
||||
TEST_EQ_STR(root_table->near_string()->c_str(), "some near string");
|
||||
|
||||
// Expect nested buffer to be properly sized.
|
||||
TEST_EQ(root_table->nested_root()->size(), nested_data.size());
|
||||
|
||||
// Expect the direct accessors to the nested buffer work.
|
||||
TEST_EQ_STR(root_table->nested_root_nested_root()->near_string()->c_str(),
|
||||
"nested: some near string");
|
||||
}
|
||||
}
|
||||
|
||||
void Offset64CreateDirect() {
|
||||
FlatBufferBuilder64 fbb;
|
||||
|
||||
// Create a vector of some data
|
||||
std::vector<uint8_t> data{ 0, 1, 2 };
|
||||
|
||||
// Call the "Direct" creation method to ensure that things are added to the
|
||||
// buffer in the correct order, Offset64 first followed by any Offsets.
|
||||
const Offset<RootTable> root_table_offset = CreateRootTableDirect(
|
||||
fbb, &data, 0, "some far string", &data, "some near string");
|
||||
|
||||
// Finish the buffer.
|
||||
fbb.Finish(root_table_offset);
|
||||
|
||||
Verifier::Options options;
|
||||
// Allow the verifier to verify 64-bit buffers.
|
||||
options.max_size = FLATBUFFERS_MAX_64_BUFFER_SIZE;
|
||||
options.assert = true;
|
||||
|
||||
Verifier verifier(fbb.GetBufferPointer(), fbb.GetSize(), options);
|
||||
|
||||
TEST_EQ(VerifyRootTableBuffer(verifier), true);
|
||||
|
||||
// Verify the data.
|
||||
const RootTable *root_table = GetRootTable(fbb.GetBufferPointer());
|
||||
TEST_EQ(root_table->far_vector()->size(), data.size());
|
||||
TEST_EQ(root_table->big_vector()->size(), data.size());
|
||||
TEST_EQ_STR(root_table->far_string()->c_str(), "some far string");
|
||||
TEST_EQ_STR(root_table->near_string()->c_str(), "some near string");
|
||||
}
|
||||
|
||||
void Offset64Evolution() {
|
||||
// Some common data for the tests.
|
||||
const std::vector<uint8_t> data = { 1, 2, 3, 4 };
|
||||
const std::vector<uint8_t> big_data = { 6, 7, 8, 9, 10 };
|
||||
|
||||
// Built V1 read V2
|
||||
{
|
||||
// Use the 32-bit builder since V1 doesn't have any 64-bit offsets.
|
||||
FlatBufferBuilder builder;
|
||||
|
||||
builder.Finish(v1::CreateRootTableDirect(builder, 1234, &data));
|
||||
|
||||
// Use each version to get a view at the root table.
|
||||
auto v1_root = v1::GetRootTable(builder.GetBufferPointer());
|
||||
auto v2_root = v2::GetRootTable(builder.GetBufferPointer());
|
||||
|
||||
// Test field equivalents for fields common to V1 and V2.
|
||||
TEST_EQ(v1_root->a(), v2_root->a());
|
||||
|
||||
TEST_EQ(v1_root->b(), v2_root->b());
|
||||
TEST_EQ(v1_root->b()->Get(2), 3);
|
||||
TEST_EQ(v2_root->b()->Get(2), 3);
|
||||
|
||||
// This field is added in V2, so it should be null since V1 couldn't have
|
||||
// written it.
|
||||
TEST_ASSERT(v2_root->big_vector() == nullptr);
|
||||
}
|
||||
|
||||
// Built V2 read V1
|
||||
{
|
||||
// Use the 64-bit builder since V2 has 64-bit offsets.
|
||||
FlatBufferBuilder64 builder;
|
||||
|
||||
builder.Finish(v2::CreateRootTableDirect(builder, 1234, &data, &big_data));
|
||||
|
||||
// Use each version to get a view at the root table.
|
||||
auto v1_root = v1::GetRootTable(builder.GetBufferPointer());
|
||||
auto v2_root = v2::GetRootTable(builder.GetBufferPointer());
|
||||
|
||||
// Test field equivalents for fields common to V1 and V2.
|
||||
TEST_EQ(v1_root->a(), v2_root->a());
|
||||
|
||||
TEST_EQ(v1_root->b(), v2_root->b());
|
||||
TEST_EQ(v1_root->b()->Get(2), 3);
|
||||
TEST_EQ(v2_root->b()->Get(2), 3);
|
||||
|
||||
// Test that V2 can read the big vector, which V1 doesn't even have
|
||||
// accessors for (i.e. v1_root->big_vector() doesn't exist).
|
||||
TEST_ASSERT(v2_root->big_vector() != nullptr);
|
||||
TEST_EQ(v2_root->big_vector()->size(), big_data.size());
|
||||
TEST_EQ(v2_root->big_vector()->Get(2), 8);
|
||||
}
|
||||
|
||||
// Built V2 read V1, bigger than max 32-bit buffer sized.
|
||||
// This checks that even a large buffer can still be read by V1.
|
||||
{
|
||||
// Use the 64-bit builder since V2 has 64-bit offsets.
|
||||
FlatBufferBuilder64 builder;
|
||||
|
||||
std::vector<uint8_t> giant_data;
|
||||
giant_data.resize(1LL << 31);
|
||||
giant_data[2] = 42;
|
||||
|
||||
builder.Finish(
|
||||
v2::CreateRootTableDirect(builder, 1234, &data, &giant_data));
|
||||
|
||||
// Ensure the buffer is bigger than the 32-bit size limit for V1.
|
||||
TEST_ASSERT(builder.GetSize() > FLATBUFFERS_MAX_BUFFER_SIZE);
|
||||
|
||||
// Use each version to get a view at the root table.
|
||||
auto v1_root = v1::GetRootTable(builder.GetBufferPointer());
|
||||
auto v2_root = v2::GetRootTable(builder.GetBufferPointer());
|
||||
|
||||
// Test field equivalents for fields common to V1 and V2.
|
||||
TEST_EQ(v1_root->a(), v2_root->a());
|
||||
|
||||
TEST_EQ(v1_root->b(), v2_root->b());
|
||||
TEST_EQ(v1_root->b()->Get(2), 3);
|
||||
TEST_EQ(v2_root->b()->Get(2), 3);
|
||||
|
||||
// Test that V2 can read the big vector, which V1 doesn't even have
|
||||
// accessors for (i.e. v1_root->big_vector() doesn't exist).
|
||||
TEST_ASSERT(v2_root->big_vector() != nullptr);
|
||||
TEST_EQ(v2_root->big_vector()->size(), giant_data.size());
|
||||
TEST_EQ(v2_root->big_vector()->Get(2), 42);
|
||||
}
|
||||
}
|
||||
|
||||
void Offset64VectorOfStructs() {
|
||||
FlatBufferBuilder64 builder;
|
||||
|
||||
std::vector<LeafStruct> far_leaves;
|
||||
far_leaves.emplace_back(LeafStruct{ 123, 4.567 });
|
||||
far_leaves.emplace_back(LeafStruct{ 987, 6.543 });
|
||||
|
||||
std::vector<LeafStruct> big_leaves;
|
||||
big_leaves.emplace_back(LeafStruct{ 72, 72.8 });
|
||||
big_leaves.emplace_back(LeafStruct{ 82, 82.8 });
|
||||
big_leaves.emplace_back(LeafStruct{ 92, 92.8 });
|
||||
|
||||
// Add the two vectors of leaf structs.
|
||||
const Offset<RootTable> root_table_offset =
|
||||
CreateRootTableDirect(builder, nullptr, 0, nullptr, nullptr, nullptr,
|
||||
nullptr, &far_leaves, &big_leaves);
|
||||
|
||||
// Finish the buffer.
|
||||
builder.Finish(root_table_offset);
|
||||
|
||||
Verifier::Options options;
|
||||
// Allow the verifier to verify 64-bit buffers.
|
||||
options.max_size = FLATBUFFERS_MAX_64_BUFFER_SIZE;
|
||||
options.assert = true;
|
||||
|
||||
Verifier verifier(builder.GetBufferPointer(), builder.GetSize(), options);
|
||||
|
||||
TEST_EQ(VerifyRootTableBuffer(verifier), true);
|
||||
|
||||
// Verify the data.
|
||||
const RootTable *root_table = GetRootTable(builder.GetBufferPointer());
|
||||
TEST_EQ(root_table->far_struct_vector()->size(), far_leaves.size());
|
||||
TEST_EQ(root_table->far_struct_vector()->Get(0)->a(), 123);
|
||||
TEST_EQ(root_table->far_struct_vector()->Get(0)->b(), 4.567);
|
||||
TEST_EQ(root_table->far_struct_vector()->Get(1)->a(), 987);
|
||||
TEST_EQ(root_table->far_struct_vector()->Get(1)->b(), 6.543);
|
||||
|
||||
TEST_EQ(root_table->big_struct_vector()->size(), big_leaves.size());
|
||||
TEST_EQ(root_table->big_struct_vector()->Get(0)->a(), 72);
|
||||
TEST_EQ(root_table->big_struct_vector()->Get(0)->b(), 72.8);
|
||||
TEST_EQ(root_table->big_struct_vector()->Get(1)->a(), 82);
|
||||
TEST_EQ(root_table->big_struct_vector()->Get(1)->b(), 82.8);
|
||||
TEST_EQ(root_table->big_struct_vector()->Get(2)->a(), 92);
|
||||
TEST_EQ(root_table->big_struct_vector()->Get(2)->b(), 92.8);
|
||||
}
|
||||
|
||||
void Offset64SizePrefix() {
|
||||
FlatBufferBuilder64 builder;
|
||||
|
||||
// First serialize a nested buffer.
|
||||
const Offset<String> near_string_offset =
|
||||
builder.CreateString("some near string");
|
||||
|
||||
// Finish by building the root table by passing in all the offsets.
|
||||
const Offset<RootTable> root_table_offset =
|
||||
CreateRootTable(builder, 0, 0, 0, 0, near_string_offset, 0);
|
||||
|
||||
// Finish the buffer.
|
||||
FinishSizePrefixedRootTableBuffer(builder, root_table_offset);
|
||||
|
||||
TEST_EQ(GetPrefixedSize<uoffset64_t>(builder.GetBufferPointer()),
|
||||
builder.GetSize() - sizeof(uoffset64_t));
|
||||
|
||||
Verifier::Options options;
|
||||
// Allow the verifier to verify 64-bit buffers.
|
||||
options.max_size = FLATBUFFERS_MAX_64_BUFFER_SIZE;
|
||||
options.assert = true;
|
||||
|
||||
Verifier verifier(builder.GetBufferPointer(), builder.GetSize(), options);
|
||||
|
||||
TEST_EQ(VerifySizePrefixedRootTableBuffer(verifier), true);
|
||||
|
||||
const RootTable *root_table =
|
||||
GetSizePrefixedRootTable(builder.GetBufferPointer());
|
||||
|
||||
// Verify the fields.
|
||||
TEST_EQ_STR(root_table->near_string()->c_str(), "some near string");
|
||||
}
|
||||
|
||||
void Offset64ManyVectors() {
|
||||
FlatBufferBuilder64 builder;
|
||||
|
||||
// Setup some data to serialize.
|
||||
std::vector<int8_t> data;
|
||||
data.resize(20);
|
||||
data.front() = 42;
|
||||
data.back() = 18;
|
||||
|
||||
const size_t kNumVectors = 20;
|
||||
|
||||
// First serialize all the 64-bit address vectors. We need to store all the
|
||||
// offsets to later add to a wrapper table. We cannot serialize one vector and
|
||||
// then add it to a table immediately, as it would violate the strict ordering
|
||||
// of putting all 64-bit things at the tail of the buffer.
|
||||
std::array<Offset64<Vector<int8_t>>, kNumVectors> offsets_64bit;
|
||||
for (size_t i = 0; i < kNumVectors; ++i) {
|
||||
offsets_64bit[i] = builder.CreateVector64<Vector>(data);
|
||||
}
|
||||
|
||||
// Create some unrelated, 64-bit offset value for later testing.
|
||||
const Offset64<String> far_string_offset =
|
||||
builder.CreateString<Offset64>("some far string");
|
||||
|
||||
// Now place all the offsets into their own wrapper tables. Again, we have to
|
||||
// store the offsets before we can add them to the root table vector.
|
||||
std::array<Offset<WrapperTable>, kNumVectors> offsets_wrapper;
|
||||
for (size_t i = 0; i < kNumVectors; ++i) {
|
||||
offsets_wrapper[i] = CreateWrapperTable(builder, offsets_64bit[i]);
|
||||
}
|
||||
|
||||
// Now create the 32-bit vector that is stored in the root table.
|
||||
// TODO(derekbailey): the array type wasn't auto deduced, see if that could be
|
||||
// fixed.
|
||||
const Offset<Vector<Offset<WrapperTable>>> many_vectors_offset =
|
||||
builder.CreateVector<Offset<WrapperTable>>(offsets_wrapper);
|
||||
|
||||
// Finish by building using the root table builder, to exercise a different
|
||||
// code path than the other tests.
|
||||
RootTableBuilder root_table_builder(builder);
|
||||
root_table_builder.add_many_vectors(many_vectors_offset);
|
||||
root_table_builder.add_far_string(far_string_offset);
|
||||
const Offset<RootTable> root_table_offset = root_table_builder.Finish();
|
||||
|
||||
// Finish the buffer.
|
||||
FinishRootTableBuffer(builder, root_table_offset);
|
||||
|
||||
Verifier::Options options;
|
||||
// Allow the verifier to verify 64-bit buffers.
|
||||
options.max_size = FLATBUFFERS_MAX_64_BUFFER_SIZE;
|
||||
options.assert = true;
|
||||
|
||||
Verifier verifier(builder.GetBufferPointer(), builder.GetSize(), options);
|
||||
|
||||
TEST_EQ(VerifyRootTableBuffer(verifier), true);
|
||||
|
||||
const RootTable *root_table = GetRootTable(builder.GetBufferPointer());
|
||||
|
||||
// Verify the fields.
|
||||
TEST_EQ_STR(root_table->far_string()->c_str(), "some far string");
|
||||
TEST_EQ(root_table->many_vectors()->size(), kNumVectors);
|
||||
|
||||
// Spot check one of the vectors.
|
||||
TEST_EQ(root_table->many_vectors()->Get(12)->vector()->size(), 20);
|
||||
TEST_EQ(root_table->many_vectors()->Get(12)->vector()->Get(0), 42);
|
||||
TEST_EQ(root_table->many_vectors()->Get(12)->vector()->Get(19), 18);
|
||||
}
|
||||
|
||||
} // namespace tests
|
||||
} // namespace flatbuffers
|
19
tests/64bit/offset64_test.h
Normal file
19
tests/64bit/offset64_test.h
Normal file
@ -0,0 +1,19 @@
|
||||
#ifndef TESTS_64BIT_OFFSET64_TEST_H
|
||||
#define TESTS_64BIT_OFFSET64_TEST_H
|
||||
|
||||
namespace flatbuffers {
|
||||
namespace tests {
|
||||
|
||||
void Offset64Test();
|
||||
void Offset64SerializedFirst();
|
||||
void Offset64NestedFlatBuffer();
|
||||
void Offset64CreateDirect();
|
||||
void Offset64Evolution();
|
||||
void Offset64VectorOfStructs();
|
||||
void Offset64SizePrefix();
|
||||
void Offset64ManyVectors();
|
||||
|
||||
} // namespace tests
|
||||
} // namespace flatbuffers
|
||||
|
||||
#endif // TESTS_64BIT_OFFSET64_TEST_H
|
74
tests/64bit/test_64bit.afb
Normal file
74
tests/64bit/test_64bit.afb
Normal file
@ -0,0 +1,74 @@
|
||||
// Annotated Flatbuffer Binary
|
||||
//
|
||||
// Schema file: tests/64bit/test_64bit.fbs
|
||||
// Binary file: tests/64bit/test_64bit.bin
|
||||
|
||||
header:
|
||||
+0x00 | 1C 00 00 00 | UOffset32 | 0x0000001C (28) Loc: 0x1C | offset to root table `RootTable`
|
||||
|
||||
padding:
|
||||
+0x04 | 00 00 00 00 | uint8_t[4] | .... | padding
|
||||
|
||||
vtable (RootTable):
|
||||
+0x08 | 14 00 | uint16_t | 0x0014 (20) | size of this vtable
|
||||
+0x0A | 34 00 | uint16_t | 0x0034 (52) | size of referring table
|
||||
+0x0C | 04 00 | VOffset16 | 0x0004 (4) | offset to field `far_vector` (id: 0)
|
||||
+0x0E | 10 00 | VOffset16 | 0x0010 (16) | offset to field `a` (id: 1)
|
||||
+0x10 | 14 00 | VOffset16 | 0x0014 (20) | offset to field `far_string` (id: 2)
|
||||
+0x12 | 24 00 | VOffset16 | 0x0024 (36) | offset to field `big_vector` (id: 3)
|
||||
+0x14 | 20 00 | VOffset16 | 0x0020 (32) | offset to field `near_string` (id: 4)
|
||||
+0x16 | 00 00 | VOffset16 | 0x0000 (0) | offset to field `nested_root` (id: 5) <null> (Vector64)
|
||||
+0x18 | 00 00 | VOffset16 | 0x0000 (0) | offset to field `far_struct_vector` (id: 6) <null> (Vector)
|
||||
+0x1A | 2C 00 | VOffset16 | 0x002C (44) | offset to field `big_struct_vector` (id: 7)
|
||||
|
||||
root_table (RootTable):
|
||||
+0x1C | 14 00 00 00 | SOffset32 | 0x00000014 (20) Loc: 0x08 | offset to vtable
|
||||
+0x20 | D0 00 00 00 00 00 00 00 | UOffset64 | 0x00000000000000D0 (208) Loc: 0xF0 | offset to field `far_vector` (vector)
|
||||
+0x28 | 00 00 00 00 | uint8_t[4] | .... | padding
|
||||
+0x2C | D2 04 00 00 | uint32_t | 0x000004D2 (1234) | table field `a` (Int)
|
||||
+0x30 | 8C 00 00 00 00 00 00 00 | UOffset64 | 0x000000000000008C (140) Loc: 0xBC | offset to field `far_string` (string)
|
||||
+0x38 | 00 00 00 00 | uint8_t[4] | .... | padding
|
||||
+0x3C | 40 00 00 00 | UOffset32 | 0x00000040 (64) Loc: 0x7C | offset to field `near_string` (string)
|
||||
+0x40 | 70 00 00 00 00 00 00 00 | UOffset64 | 0x0000000000000070 (112) Loc: 0xB0 | offset to field `big_vector` (vector64)
|
||||
+0x48 | 08 00 00 00 00 00 00 00 | UOffset64 | 0x0000000000000008 (8) Loc: 0x50 | offset to field `big_struct_vector` (vector64)
|
||||
|
||||
vector64 (RootTable.big_struct_vector):
|
||||
+0x50 | 02 00 00 00 00 00 00 00 | uint64_t | 0x0000000000000002 (2) | length of vector (# items)
|
||||
+0x58 | 0C 00 00 00 | uint32_t | 0x0000000C (12) | struct field `[0].a` of 'LeafStruct' (Int)
|
||||
<4 regions omitted>
|
||||
+0x70 | 33 33 33 33 33 33 22 40 | double | 0x4022333333333333 (9.1) | struct field `[1].b` of 'LeafStruct' (Double)
|
||||
|
||||
padding:
|
||||
+0x78 | 00 00 00 00 | uint8_t[4] | .... | padding
|
||||
|
||||
string (RootTable.near_string):
|
||||
+0x7C | 2F 00 00 00 | uint32_t | 0x0000002F (47) | length of string
|
||||
+0x80 | 74 68 69 73 20 69 73 20 | char[47] | this is | string literal
|
||||
+0x88 | 61 20 6E 65 61 72 20 73 | | a near s
|
||||
+0x90 | 74 72 69 6E 67 20 77 68 | | tring wh
|
||||
+0x98 | 69 63 68 20 68 61 73 20 | | ich has
|
||||
+0xA0 | 61 20 33 32 2D 62 69 74 | | a 32-bit
|
||||
+0xA8 | 20 6F 66 66 73 65 74 | | offset
|
||||
+0xAF | 00 | char | 0x00 (0) | string terminator
|
||||
|
||||
vector64 (RootTable.big_vector):
|
||||
+0xB0 | 04 00 00 00 00 00 00 00 | uint64_t | 0x0000000000000004 (4) | length of vector (# items)
|
||||
+0xB8 | 05 | uint8_t | 0x05 (5) | value[0]
|
||||
<2 regions omitted>
|
||||
+0xBB | 08 | uint8_t | 0x08 (8) | value[3]
|
||||
|
||||
string (RootTable.far_string):
|
||||
+0xBC | 2E 00 00 00 | uint32_t | 0x0000002E (46) | length of string
|
||||
+0xC0 | 74 68 69 73 20 69 73 20 | char[46] | this is | string literal
|
||||
+0xC8 | 61 20 66 61 72 20 73 74 | | a far st
|
||||
+0xD0 | 72 69 6E 67 20 77 68 69 | | ring whi
|
||||
+0xD8 | 63 68 20 68 61 73 20 61 | | ch has a
|
||||
+0xE0 | 20 36 34 2D 62 69 74 20 | | 64-bit
|
||||
+0xE8 | 6F 66 66 73 65 74 | | offset
|
||||
+0xEE | 00 | char | 0x00 (0) | string terminator
|
||||
|
||||
vector (RootTable.far_vector):
|
||||
+0xF0 | 03 00 00 00 | uint32_t | 0x00000003 (3) | length of vector (# items)
|
||||
+0xF4 | 01 | uint8_t | 0x01 (1) | value[0]
|
||||
+0xF5 | 02 | uint8_t | 0x02 (2) | value[1]
|
||||
+0xF6 | 03 | uint8_t | 0x03 (3) | value[2]
|
BIN
tests/64bit/test_64bit.bfbs
Normal file
BIN
tests/64bit/test_64bit.bfbs
Normal file
Binary file not shown.
BIN
tests/64bit/test_64bit.bin
Normal file
BIN
tests/64bit/test_64bit.bin
Normal file
Binary file not shown.
49
tests/64bit/test_64bit.fbs
Normal file
49
tests/64bit/test_64bit.fbs
Normal file
@ -0,0 +1,49 @@
|
||||
struct LeafStruct {
|
||||
a:int;
|
||||
b:double;
|
||||
}
|
||||
|
||||
table WrapperTable {
|
||||
// A normal 32-bit sized vector that could be very far away (64-bit address).
|
||||
vector:[int8] (offset64);
|
||||
}
|
||||
|
||||
table RootTable {
|
||||
// A normal 32-bit sized vector, that could be very far away (64-bit address).
|
||||
far_vector:[ubyte] (offset64);
|
||||
|
||||
// An inplace value just to check that vtable offsets are correct.
|
||||
a:int;
|
||||
|
||||
// A normal 32-bit sized string, that could be very far away (64-bit address).
|
||||
far_string:string (offset64);
|
||||
|
||||
// A big 64-bit sized vector, that could be very far away (64-bit address).
|
||||
big_vector:[ubyte] (vector64);
|
||||
|
||||
// A normal 32-bit sized string that is no far away (32-bit address).
|
||||
near_string:string;
|
||||
|
||||
// A big 64-bit sized vector that is a nested flatbuffers (64-bit address).
|
||||
nested_root:[ubyte] (vector64, nested_flatbuffer: "RootTable");
|
||||
|
||||
// A normal 32-bit size vector of structs, that could be very far away
|
||||
// (64-bit address)
|
||||
far_struct_vector:[LeafStruct] (offset64);
|
||||
|
||||
// A big 64-bit size vector of structs that could be very far away
|
||||
// (64-bit address)
|
||||
big_struct_vector:[LeafStruct] (vector64);
|
||||
|
||||
// A normal 32-bit size vector of tables. Currently 64-bit vectors don't
|
||||
// support tables as it would require serializing a table (32-bit) before the
|
||||
// vector (64-bit), which is not allowed.
|
||||
//
|
||||
// This demonstrates how you could have many vectors in the buffer, by
|
||||
// effectively having a vector of 64-bit vectors. The IDL doesn't support
|
||||
// nested vecotrs (e.g.: [[type]] ), so going through a wrapper table allows
|
||||
// this.
|
||||
many_vectors:[WrapperTable];
|
||||
}
|
||||
|
||||
root_type RootTable;
|
17
tests/64bit/test_64bit.json
Normal file
17
tests/64bit/test_64bit.json
Normal file
@ -0,0 +1,17 @@
|
||||
{
|
||||
"far_vector": [1, 2, 3],
|
||||
"a": 1234,
|
||||
"far_string": "this is a far string which has a 64-bit offset",
|
||||
"big_vector": [5, 6, 7, 8],
|
||||
"near_string": "this is a near string which has a 32-bit offset",
|
||||
"big_struct_vector": [
|
||||
{
|
||||
"a": 12,
|
||||
"b": 3.456
|
||||
},
|
||||
{
|
||||
"a": 78,
|
||||
"b": 9.10
|
||||
}
|
||||
]
|
||||
}
|
93
tests/64bit/test_64bit_bfbs_generated.h
Normal file
93
tests/64bit/test_64bit_bfbs_generated.h
Normal file
@ -0,0 +1,93 @@
|
||||
// automatically generated by the FlatBuffers compiler, do not modify
|
||||
|
||||
|
||||
#ifndef FLATBUFFERS_GENERATED_TEST64BIT_BFBS_H_
|
||||
#define FLATBUFFERS_GENERATED_TEST64BIT_BFBS_H_
|
||||
|
||||
#include "flatbuffers/flatbuffers.h"
|
||||
|
||||
// Ensure the included flatbuffers.h is the same version as when this file was
|
||||
// generated, otherwise it may not be compatible.
|
||||
static_assert(FLATBUFFERS_VERSION_MAJOR == 23 &&
|
||||
FLATBUFFERS_VERSION_MINOR == 5 &&
|
||||
FLATBUFFERS_VERSION_REVISION == 8,
|
||||
"Non-compatible flatbuffers version included");
|
||||
|
||||
struct RootTableBinarySchema {
|
||||
static const uint8_t *data() {
|
||||
// Buffer containing the binary schema.
|
||||
static const uint8_t bfbsData[1180] = {
|
||||
0x1C,0x00,0x00,0x00,0x42,0x46,0x42,0x53,0x14,0x00,0x20,0x00,0x04,0x00,0x08,0x00,0x0C,0x00,0x10,0x00,
|
||||
0x14,0x00,0x18,0x00,0x00,0x00,0x1C,0x00,0x14,0x00,0x00,0x00,0x34,0x00,0x00,0x00,0x2C,0x00,0x00,0x00,
|
||||
0x20,0x00,0x00,0x00,0x14,0x00,0x00,0x00,0x54,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x2C,0x00,0x00,0x00,
|
||||
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
|
||||
0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x60,0x03,0x00,0x00,0x28,0x00,0x00,0x00,0xBC,0x02,0x00,0x00,
|
||||
0x01,0x00,0x00,0x00,0x0C,0x00,0x00,0x00,0x08,0x00,0x0C,0x00,0x04,0x00,0x08,0x00,0x08,0x00,0x00,0x00,
|
||||
0x5C,0x03,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x7C,0xFD,0xFF,0xFF,0x38,0x00,0x00,0x00,
|
||||
0x0C,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x40,0x03,0x00,0x00,0x09,0x00,0x00,0x00,0x1C,0x02,0x00,0x00,
|
||||
0x68,0x00,0x00,0x00,0x80,0x01,0x00,0x00,0xE0,0x01,0x00,0x00,0xAC,0x00,0x00,0x00,0x2C,0x02,0x00,0x00,
|
||||
0x1C,0x00,0x00,0x00,0x3C,0x01,0x00,0x00,0xE8,0x00,0x00,0x00,0x09,0x00,0x00,0x00,0x52,0x6F,0x6F,0x74,
|
||||
0x54,0x61,0x62,0x6C,0x65,0x00,0x00,0x00,0xF8,0xFE,0xFF,0xFF,0x00,0x00,0x00,0x01,0x08,0x00,0x14,0x00,
|
||||
0x18,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x78,0xFF,0xFF,0xFF,0x00,0x00,0x0E,0x0F,0x02,0x00,0x00,0x00,
|
||||
0x04,0x00,0x00,0x00,0x0C,0x00,0x00,0x00,0x6D,0x61,0x6E,0x79,0x5F,0x76,0x65,0x63,0x74,0x6F,0x72,0x73,
|
||||
0x00,0x00,0x00,0x00,0xA0,0xFE,0xFF,0xFF,0x00,0x00,0x01,0x01,0x07,0x00,0x12,0x00,0x2C,0x00,0x00,0x00,
|
||||
0x14,0x00,0x00,0x00,0x10,0x00,0x14,0x00,0x06,0x00,0x07,0x00,0x08,0x00,0x00,0x00,0x0C,0x00,0x10,0x00,
|
||||
0x10,0x00,0x00,0x00,0x00,0x00,0x12,0x0F,0x00,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x04,0x00,0x00,0x00,
|
||||
0x11,0x00,0x00,0x00,0x62,0x69,0x67,0x5F,0x73,0x74,0x72,0x75,0x63,0x74,0x5F,0x76,0x65,0x63,0x74,0x6F,
|
||||
0x72,0x00,0x00,0x00,0xF0,0xFE,0xFF,0xFF,0x00,0x00,0x01,0x01,0x06,0x00,0x10,0x00,0x28,0x00,0x00,0x00,
|
||||
0x14,0x00,0x00,0x00,0x10,0x00,0x10,0x00,0x06,0x00,0x07,0x00,0x08,0x00,0x00,0x00,0x00,0x00,0x0C,0x00,
|
||||
0x10,0x00,0x00,0x00,0x00,0x00,0x0E,0x0F,0x00,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x11,0x00,0x00,0x00,
|
||||
0x66,0x61,0x72,0x5F,0x73,0x74,0x72,0x75,0x63,0x74,0x5F,0x76,0x65,0x63,0x74,0x6F,0x72,0x00,0x00,0x00,
|
||||
0x3C,0xFF,0xFF,0xFF,0x00,0x00,0x01,0x01,0x05,0x00,0x0E,0x00,0x18,0x00,0x00,0x00,0x04,0x00,0x00,0x00,
|
||||
0x80,0xFF,0xFF,0xFF,0x00,0x00,0x12,0x04,0x08,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x0B,0x00,0x00,0x00,
|
||||
0x6E,0x65,0x73,0x74,0x65,0x64,0x5F,0x72,0x6F,0x6F,0x74,0x00,0x1C,0x00,0x14,0x00,0x0C,0x00,0x10,0x00,
|
||||
0x08,0x00,0x0A,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x00,
|
||||
0x1C,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x04,0x00,0x0C,0x00,0x14,0x00,0x00,0x00,0x04,0x00,0x00,0x00,
|
||||
0x90,0xFD,0xFF,0xFF,0x00,0x00,0x00,0x0D,0x01,0x00,0x00,0x00,0x0B,0x00,0x00,0x00,0x6E,0x65,0x61,0x72,
|
||||
0x5F,0x73,0x74,0x72,0x69,0x6E,0x67,0x00,0xBC,0xFF,0xFF,0xFF,0x00,0x00,0x01,0x01,0x03,0x00,0x0A,0x00,
|
||||
0x28,0x00,0x00,0x00,0x14,0x00,0x00,0x00,0x10,0x00,0x10,0x00,0x06,0x00,0x07,0x00,0x00,0x00,0x00,0x00,
|
||||
0x08,0x00,0x0C,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0x12,0x04,0x08,0x00,0x00,0x00,0x01,0x00,0x00,0x00,
|
||||
0x0A,0x00,0x00,0x00,0x62,0x69,0x67,0x5F,0x76,0x65,0x63,0x74,0x6F,0x72,0x00,0x00,0x20,0x00,0x14,0x00,
|
||||
0x0C,0x00,0x10,0x00,0x08,0x00,0x0A,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
|
||||
0x00,0x00,0x06,0x00,0x00,0x00,0x07,0x00,0x20,0x00,0x00,0x00,0x00,0x00,0x01,0x01,0x02,0x00,0x08,0x00,
|
||||
0x14,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x24,0xFE,0xFF,0xFF,0x00,0x00,0x00,0x0D,0x01,0x00,0x00,0x00,
|
||||
0x0A,0x00,0x00,0x00,0x66,0x61,0x72,0x5F,0x73,0x74,0x72,0x69,0x6E,0x67,0x00,0x00,0xB0,0xFE,0xFF,0xFF,
|
||||
0x01,0x00,0x06,0x00,0x14,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x50,0xFE,0xFF,0xFF,0x00,0x00,0x00,0x07,
|
||||
0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x61,0x00,0x00,0x00,0x90,0xFF,0xFF,0xFF,0x01,0x01,0x04,0x00,
|
||||
0x14,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x70,0xFF,0xFF,0xFF,0x00,0x00,0x0E,0x04,0x01,0x00,0x00,0x00,
|
||||
0x0A,0x00,0x00,0x00,0x66,0x61,0x72,0x5F,0x76,0x65,0x63,0x74,0x6F,0x72,0x00,0x00,0x14,0x00,0x14,0x00,
|
||||
0x04,0x00,0x08,0x00,0x00,0x00,0x0C,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x14,0x00,0x00,0x00,
|
||||
0x18,0x00,0x00,0x00,0x0C,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0xA8,0x00,0x00,0x00,0x01,0x00,0x00,0x00,
|
||||
0x38,0x00,0x00,0x00,0x0C,0x00,0x00,0x00,0x57,0x72,0x61,0x70,0x70,0x65,0x72,0x54,0x61,0x62,0x6C,0x65,
|
||||
0x00,0x00,0x00,0x00,0x20,0x00,0x10,0x00,0x08,0x00,0x0C,0x00,0x00,0x00,0x06,0x00,0x00,0x00,0x00,0x00,
|
||||
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x05,0x00,0x20,0x00,0x00,0x00,
|
||||
0x01,0x01,0x04,0x00,0x24,0x00,0x00,0x00,0x14,0x00,0x00,0x00,0x10,0x00,0x0C,0x00,0x06,0x00,0x07,0x00,
|
||||
0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0x0E,0x03,0x01,0x00,0x00,0x00,
|
||||
0x06,0x00,0x00,0x00,0x76,0x65,0x63,0x74,0x6F,0x72,0x00,0x00,0x14,0x00,0x1C,0x00,0x08,0x00,0x0C,0x00,
|
||||
0x07,0x00,0x10,0x00,0x14,0x00,0x00,0x00,0x00,0x00,0x18,0x00,0x14,0x00,0x00,0x00,0x00,0x00,0x00,0x01,
|
||||
0x38,0x00,0x00,0x00,0x28,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x04,0x00,0x00,0x00,
|
||||
0x10,0x00,0x00,0x00,0x2F,0x2F,0x74,0x65,0x73,0x74,0x5F,0x36,0x34,0x62,0x69,0x74,0x2E,0x66,0x62,0x73,
|
||||
0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x78,0x00,0x00,0x00,0x20,0x00,0x00,0x00,0x0A,0x00,0x00,0x00,
|
||||
0x4C,0x65,0x61,0x66,0x53,0x74,0x72,0x75,0x63,0x74,0x00,0x00,0x0C,0x00,0x10,0x00,0x08,0x00,0x0C,0x00,
|
||||
0x04,0x00,0x06,0x00,0x0C,0x00,0x00,0x00,0x01,0x00,0x08,0x00,0x28,0x00,0x00,0x00,0x14,0x00,0x00,0x00,
|
||||
0x10,0x00,0x10,0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x0C,0x00,0x10,0x00,0x00,0x00,
|
||||
0x00,0x00,0x00,0x0C,0x08,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x62,0x00,0x1E,0x00,
|
||||
0x10,0x00,0x08,0x00,0x0C,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
|
||||
0x00,0x00,0x00,0x00,0x00,0x00,0x06,0x00,0x1E,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x24,0x00,0x00,0x00,
|
||||
0x14,0x00,0x00,0x00,0x10,0x00,0x0C,0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
|
||||
0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x61,0x00,0x00,0x00
|
||||
};
|
||||
return bfbsData;
|
||||
}
|
||||
static size_t size() {
|
||||
return 1180;
|
||||
}
|
||||
const uint8_t *begin() {
|
||||
return data();
|
||||
}
|
||||
const uint8_t *end() {
|
||||
return data() + size();
|
||||
}
|
||||
};
|
||||
|
||||
#endif // FLATBUFFERS_GENERATED_TEST64BIT_BFBS_H_
|
625
tests/64bit/test_64bit_generated.h
Normal file
625
tests/64bit/test_64bit_generated.h
Normal file
@ -0,0 +1,625 @@
|
||||
// automatically generated by the FlatBuffers compiler, do not modify
|
||||
|
||||
|
||||
#ifndef FLATBUFFERS_GENERATED_TEST64BIT_H_
|
||||
#define FLATBUFFERS_GENERATED_TEST64BIT_H_
|
||||
|
||||
#include "flatbuffers/flatbuffers.h"
|
||||
|
||||
// Ensure the included flatbuffers.h is the same version as when this file was
|
||||
// generated, otherwise it may not be compatible.
|
||||
static_assert(FLATBUFFERS_VERSION_MAJOR == 23 &&
|
||||
FLATBUFFERS_VERSION_MINOR == 5 &&
|
||||
FLATBUFFERS_VERSION_REVISION == 8,
|
||||
"Non-compatible flatbuffers version included");
|
||||
|
||||
// For access to the binary schema that produced this file.
|
||||
#include "test_64bit_bfbs_generated.h"
|
||||
|
||||
struct LeafStruct;
|
||||
|
||||
struct WrapperTable;
|
||||
struct WrapperTableBuilder;
|
||||
struct WrapperTableT;
|
||||
|
||||
struct RootTable;
|
||||
struct RootTableBuilder;
|
||||
struct RootTableT;
|
||||
|
||||
bool operator==(const LeafStruct &lhs, const LeafStruct &rhs);
|
||||
bool operator!=(const LeafStruct &lhs, const LeafStruct &rhs);
|
||||
bool operator==(const WrapperTableT &lhs, const WrapperTableT &rhs);
|
||||
bool operator!=(const WrapperTableT &lhs, const WrapperTableT &rhs);
|
||||
bool operator==(const RootTableT &lhs, const RootTableT &rhs);
|
||||
bool operator!=(const RootTableT &lhs, const RootTableT &rhs);
|
||||
|
||||
inline const ::flatbuffers::TypeTable *LeafStructTypeTable();
|
||||
|
||||
inline const ::flatbuffers::TypeTable *WrapperTableTypeTable();
|
||||
|
||||
inline const ::flatbuffers::TypeTable *RootTableTypeTable();
|
||||
|
||||
FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(8) LeafStruct FLATBUFFERS_FINAL_CLASS {
|
||||
private:
|
||||
int32_t a_;
|
||||
int32_t padding0__;
|
||||
double b_;
|
||||
|
||||
public:
|
||||
static const ::flatbuffers::TypeTable *MiniReflectTypeTable() {
|
||||
return LeafStructTypeTable();
|
||||
}
|
||||
LeafStruct()
|
||||
: a_(0),
|
||||
padding0__(0),
|
||||
b_(0) {
|
||||
(void)padding0__;
|
||||
}
|
||||
LeafStruct(int32_t _a, double _b)
|
||||
: a_(::flatbuffers::EndianScalar(_a)),
|
||||
padding0__(0),
|
||||
b_(::flatbuffers::EndianScalar(_b)) {
|
||||
(void)padding0__;
|
||||
}
|
||||
int32_t a() const {
|
||||
return ::flatbuffers::EndianScalar(a_);
|
||||
}
|
||||
void mutate_a(int32_t _a) {
|
||||
::flatbuffers::WriteScalar(&a_, _a);
|
||||
}
|
||||
double b() const {
|
||||
return ::flatbuffers::EndianScalar(b_);
|
||||
}
|
||||
void mutate_b(double _b) {
|
||||
::flatbuffers::WriteScalar(&b_, _b);
|
||||
}
|
||||
};
|
||||
FLATBUFFERS_STRUCT_END(LeafStruct, 16);
|
||||
|
||||
inline bool operator==(const LeafStruct &lhs, const LeafStruct &rhs) {
|
||||
return
|
||||
(lhs.a() == rhs.a()) &&
|
||||
(lhs.b() == rhs.b());
|
||||
}
|
||||
|
||||
inline bool operator!=(const LeafStruct &lhs, const LeafStruct &rhs) {
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
|
||||
struct WrapperTableT : public ::flatbuffers::NativeTable {
|
||||
typedef WrapperTable TableType;
|
||||
std::vector<int8_t> vector{};
|
||||
};
|
||||
|
||||
struct WrapperTable FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
|
||||
typedef WrapperTableT NativeTableType;
|
||||
typedef WrapperTableBuilder Builder;
|
||||
typedef RootTableBinarySchema BinarySchema;
|
||||
static const ::flatbuffers::TypeTable *MiniReflectTypeTable() {
|
||||
return WrapperTableTypeTable();
|
||||
}
|
||||
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
|
||||
VT_VECTOR = 4
|
||||
};
|
||||
const ::flatbuffers::Vector<int8_t> *vector() const {
|
||||
return GetPointer64<const ::flatbuffers::Vector<int8_t> *>(VT_VECTOR);
|
||||
}
|
||||
::flatbuffers::Vector<int8_t> *mutable_vector() {
|
||||
return GetPointer64<::flatbuffers::Vector<int8_t> *>(VT_VECTOR);
|
||||
}
|
||||
bool Verify(::flatbuffers::Verifier &verifier) const {
|
||||
return VerifyTableStart(verifier) &&
|
||||
VerifyOffset64(verifier, VT_VECTOR) &&
|
||||
verifier.VerifyVector(vector()) &&
|
||||
verifier.EndTable();
|
||||
}
|
||||
WrapperTableT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
|
||||
void UnPackTo(WrapperTableT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
|
||||
static ::flatbuffers::Offset<WrapperTable> Pack(::flatbuffers::FlatBufferBuilder64 &_fbb, const WrapperTableT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
|
||||
};
|
||||
|
||||
struct WrapperTableBuilder {
|
||||
typedef WrapperTable Table;
|
||||
::flatbuffers::FlatBufferBuilder64 &fbb_;
|
||||
::flatbuffers::uoffset_t start_;
|
||||
void add_vector(::flatbuffers::Offset64<::flatbuffers::Vector<int8_t>> vector) {
|
||||
fbb_.AddOffset(WrapperTable::VT_VECTOR, vector);
|
||||
}
|
||||
explicit WrapperTableBuilder(::flatbuffers::FlatBufferBuilder64 &_fbb)
|
||||
: fbb_(_fbb) {
|
||||
start_ = fbb_.StartTable();
|
||||
}
|
||||
::flatbuffers::Offset<WrapperTable> Finish() {
|
||||
const auto end = fbb_.EndTable(start_);
|
||||
auto o = ::flatbuffers::Offset<WrapperTable>(end);
|
||||
return o;
|
||||
}
|
||||
};
|
||||
|
||||
inline ::flatbuffers::Offset<WrapperTable> CreateWrapperTable(
|
||||
::flatbuffers::FlatBufferBuilder64 &_fbb,
|
||||
::flatbuffers::Offset64<::flatbuffers::Vector<int8_t>> vector = 0) {
|
||||
WrapperTableBuilder builder_(_fbb);
|
||||
builder_.add_vector(vector);
|
||||
return builder_.Finish();
|
||||
}
|
||||
|
||||
inline ::flatbuffers::Offset<WrapperTable> CreateWrapperTableDirect(
|
||||
::flatbuffers::FlatBufferBuilder64 &_fbb,
|
||||
const std::vector<int8_t> *vector = nullptr) {
|
||||
auto vector__ = vector ? _fbb.CreateVector64<::flatbuffers::Vector>(*vector) : 0;
|
||||
return CreateWrapperTable(
|
||||
_fbb,
|
||||
vector__);
|
||||
}
|
||||
|
||||
::flatbuffers::Offset<WrapperTable> CreateWrapperTable(::flatbuffers::FlatBufferBuilder64 &_fbb, const WrapperTableT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
|
||||
|
||||
struct RootTableT : public ::flatbuffers::NativeTable {
|
||||
typedef RootTable TableType;
|
||||
std::vector<uint8_t> far_vector{};
|
||||
int32_t a = 0;
|
||||
std::string far_string{};
|
||||
std::vector<uint8_t> big_vector{};
|
||||
std::string near_string{};
|
||||
std::vector<uint8_t> nested_root{};
|
||||
std::vector<LeafStruct> far_struct_vector{};
|
||||
std::vector<LeafStruct> big_struct_vector{};
|
||||
std::vector<std::unique_ptr<WrapperTableT>> many_vectors{};
|
||||
RootTableT() = default;
|
||||
RootTableT(const RootTableT &o);
|
||||
RootTableT(RootTableT&&) FLATBUFFERS_NOEXCEPT = default;
|
||||
RootTableT &operator=(RootTableT o) FLATBUFFERS_NOEXCEPT;
|
||||
};
|
||||
|
||||
struct RootTable FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
|
||||
typedef RootTableT NativeTableType;
|
||||
typedef RootTableBuilder Builder;
|
||||
typedef RootTableBinarySchema BinarySchema;
|
||||
static const ::flatbuffers::TypeTable *MiniReflectTypeTable() {
|
||||
return RootTableTypeTable();
|
||||
}
|
||||
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
|
||||
VT_FAR_VECTOR = 4,
|
||||
VT_A = 6,
|
||||
VT_FAR_STRING = 8,
|
||||
VT_BIG_VECTOR = 10,
|
||||
VT_NEAR_STRING = 12,
|
||||
VT_NESTED_ROOT = 14,
|
||||
VT_FAR_STRUCT_VECTOR = 16,
|
||||
VT_BIG_STRUCT_VECTOR = 18,
|
||||
VT_MANY_VECTORS = 20
|
||||
};
|
||||
const ::flatbuffers::Vector<uint8_t> *far_vector() const {
|
||||
return GetPointer64<const ::flatbuffers::Vector<uint8_t> *>(VT_FAR_VECTOR);
|
||||
}
|
||||
::flatbuffers::Vector<uint8_t> *mutable_far_vector() {
|
||||
return GetPointer64<::flatbuffers::Vector<uint8_t> *>(VT_FAR_VECTOR);
|
||||
}
|
||||
int32_t a() const {
|
||||
return GetField<int32_t>(VT_A, 0);
|
||||
}
|
||||
bool mutate_a(int32_t _a = 0) {
|
||||
return SetField<int32_t>(VT_A, _a, 0);
|
||||
}
|
||||
const ::flatbuffers::String *far_string() const {
|
||||
return GetPointer64<const ::flatbuffers::String *>(VT_FAR_STRING);
|
||||
}
|
||||
::flatbuffers::String *mutable_far_string() {
|
||||
return GetPointer64<::flatbuffers::String *>(VT_FAR_STRING);
|
||||
}
|
||||
const ::flatbuffers::Vector64<uint8_t> *big_vector() const {
|
||||
return GetPointer64<const ::flatbuffers::Vector64<uint8_t> *>(VT_BIG_VECTOR);
|
||||
}
|
||||
::flatbuffers::Vector64<uint8_t> *mutable_big_vector() {
|
||||
return GetPointer64<::flatbuffers::Vector64<uint8_t> *>(VT_BIG_VECTOR);
|
||||
}
|
||||
const ::flatbuffers::String *near_string() const {
|
||||
return GetPointer<const ::flatbuffers::String *>(VT_NEAR_STRING);
|
||||
}
|
||||
::flatbuffers::String *mutable_near_string() {
|
||||
return GetPointer<::flatbuffers::String *>(VT_NEAR_STRING);
|
||||
}
|
||||
const ::flatbuffers::Vector64<uint8_t> *nested_root() const {
|
||||
return GetPointer64<const ::flatbuffers::Vector64<uint8_t> *>(VT_NESTED_ROOT);
|
||||
}
|
||||
::flatbuffers::Vector64<uint8_t> *mutable_nested_root() {
|
||||
return GetPointer64<::flatbuffers::Vector64<uint8_t> *>(VT_NESTED_ROOT);
|
||||
}
|
||||
const RootTable *nested_root_nested_root() const {
|
||||
const auto _f = nested_root();
|
||||
return _f ? ::flatbuffers::GetRoot<RootTable>(_f->Data())
|
||||
: nullptr;
|
||||
}
|
||||
const ::flatbuffers::Vector<const LeafStruct *> *far_struct_vector() const {
|
||||
return GetPointer64<const ::flatbuffers::Vector<const LeafStruct *> *>(VT_FAR_STRUCT_VECTOR);
|
||||
}
|
||||
::flatbuffers::Vector<const LeafStruct *> *mutable_far_struct_vector() {
|
||||
return GetPointer64<::flatbuffers::Vector<const LeafStruct *> *>(VT_FAR_STRUCT_VECTOR);
|
||||
}
|
||||
const ::flatbuffers::Vector64<const LeafStruct *> *big_struct_vector() const {
|
||||
return GetPointer64<const ::flatbuffers::Vector64<const LeafStruct *> *>(VT_BIG_STRUCT_VECTOR);
|
||||
}
|
||||
::flatbuffers::Vector64<const LeafStruct *> *mutable_big_struct_vector() {
|
||||
return GetPointer64<::flatbuffers::Vector64<const LeafStruct *> *>(VT_BIG_STRUCT_VECTOR);
|
||||
}
|
||||
const ::flatbuffers::Vector<::flatbuffers::Offset<WrapperTable>> *many_vectors() const {
|
||||
return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<WrapperTable>> *>(VT_MANY_VECTORS);
|
||||
}
|
||||
::flatbuffers::Vector<::flatbuffers::Offset<WrapperTable>> *mutable_many_vectors() {
|
||||
return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset<WrapperTable>> *>(VT_MANY_VECTORS);
|
||||
}
|
||||
bool Verify(::flatbuffers::Verifier &verifier) const {
|
||||
return VerifyTableStart(verifier) &&
|
||||
VerifyOffset64(verifier, VT_FAR_VECTOR) &&
|
||||
verifier.VerifyVector(far_vector()) &&
|
||||
VerifyField<int32_t>(verifier, VT_A, 4) &&
|
||||
VerifyOffset64(verifier, VT_FAR_STRING) &&
|
||||
verifier.VerifyString(far_string()) &&
|
||||
VerifyOffset64(verifier, VT_BIG_VECTOR) &&
|
||||
verifier.VerifyVector(big_vector()) &&
|
||||
VerifyOffset(verifier, VT_NEAR_STRING) &&
|
||||
verifier.VerifyString(near_string()) &&
|
||||
VerifyOffset64(verifier, VT_NESTED_ROOT) &&
|
||||
verifier.VerifyVector(nested_root()) &&
|
||||
verifier.VerifyNestedFlatBuffer<RootTable>(nested_root(), nullptr) &&
|
||||
VerifyOffset64(verifier, VT_FAR_STRUCT_VECTOR) &&
|
||||
verifier.VerifyVector(far_struct_vector()) &&
|
||||
VerifyOffset64(verifier, VT_BIG_STRUCT_VECTOR) &&
|
||||
verifier.VerifyVector(big_struct_vector()) &&
|
||||
VerifyOffset(verifier, VT_MANY_VECTORS) &&
|
||||
verifier.VerifyVector(many_vectors()) &&
|
||||
verifier.VerifyVectorOfTables(many_vectors()) &&
|
||||
verifier.EndTable();
|
||||
}
|
||||
RootTableT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
|
||||
void UnPackTo(RootTableT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
|
||||
static ::flatbuffers::Offset<RootTable> Pack(::flatbuffers::FlatBufferBuilder64 &_fbb, const RootTableT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
|
||||
};
|
||||
|
||||
struct RootTableBuilder {
|
||||
typedef RootTable Table;
|
||||
::flatbuffers::FlatBufferBuilder64 &fbb_;
|
||||
::flatbuffers::uoffset_t start_;
|
||||
void add_far_vector(::flatbuffers::Offset64<::flatbuffers::Vector<uint8_t>> far_vector) {
|
||||
fbb_.AddOffset(RootTable::VT_FAR_VECTOR, far_vector);
|
||||
}
|
||||
void add_a(int32_t a) {
|
||||
fbb_.AddElement<int32_t>(RootTable::VT_A, a, 0);
|
||||
}
|
||||
void add_far_string(::flatbuffers::Offset64<::flatbuffers::String> far_string) {
|
||||
fbb_.AddOffset(RootTable::VT_FAR_STRING, far_string);
|
||||
}
|
||||
void add_big_vector(::flatbuffers::Offset64<::flatbuffers::Vector64<uint8_t>> big_vector) {
|
||||
fbb_.AddOffset(RootTable::VT_BIG_VECTOR, big_vector);
|
||||
}
|
||||
void add_near_string(::flatbuffers::Offset<::flatbuffers::String> near_string) {
|
||||
fbb_.AddOffset(RootTable::VT_NEAR_STRING, near_string);
|
||||
}
|
||||
void add_nested_root(::flatbuffers::Offset64<::flatbuffers::Vector64<uint8_t>> nested_root) {
|
||||
fbb_.AddOffset(RootTable::VT_NESTED_ROOT, nested_root);
|
||||
}
|
||||
void add_far_struct_vector(::flatbuffers::Offset64<::flatbuffers::Vector<const LeafStruct *>> far_struct_vector) {
|
||||
fbb_.AddOffset(RootTable::VT_FAR_STRUCT_VECTOR, far_struct_vector);
|
||||
}
|
||||
void add_big_struct_vector(::flatbuffers::Offset64<::flatbuffers::Vector64<const LeafStruct *>> big_struct_vector) {
|
||||
fbb_.AddOffset(RootTable::VT_BIG_STRUCT_VECTOR, big_struct_vector);
|
||||
}
|
||||
void add_many_vectors(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<WrapperTable>>> many_vectors) {
|
||||
fbb_.AddOffset(RootTable::VT_MANY_VECTORS, many_vectors);
|
||||
}
|
||||
explicit RootTableBuilder(::flatbuffers::FlatBufferBuilder64 &_fbb)
|
||||
: fbb_(_fbb) {
|
||||
start_ = fbb_.StartTable();
|
||||
}
|
||||
::flatbuffers::Offset<RootTable> Finish() {
|
||||
const auto end = fbb_.EndTable(start_);
|
||||
auto o = ::flatbuffers::Offset<RootTable>(end);
|
||||
return o;
|
||||
}
|
||||
};
|
||||
|
||||
inline ::flatbuffers::Offset<RootTable> CreateRootTable(
|
||||
::flatbuffers::FlatBufferBuilder64 &_fbb,
|
||||
::flatbuffers::Offset64<::flatbuffers::Vector<uint8_t>> far_vector = 0,
|
||||
int32_t a = 0,
|
||||
::flatbuffers::Offset64<::flatbuffers::String> far_string = 0,
|
||||
::flatbuffers::Offset64<::flatbuffers::Vector64<uint8_t>> big_vector = 0,
|
||||
::flatbuffers::Offset<::flatbuffers::String> near_string = 0,
|
||||
::flatbuffers::Offset64<::flatbuffers::Vector64<uint8_t>> nested_root = 0,
|
||||
::flatbuffers::Offset64<::flatbuffers::Vector<const LeafStruct *>> far_struct_vector = 0,
|
||||
::flatbuffers::Offset64<::flatbuffers::Vector64<const LeafStruct *>> big_struct_vector = 0,
|
||||
::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<WrapperTable>>> many_vectors = 0) {
|
||||
RootTableBuilder builder_(_fbb);
|
||||
builder_.add_big_struct_vector(big_struct_vector);
|
||||
builder_.add_nested_root(nested_root);
|
||||
builder_.add_big_vector(big_vector);
|
||||
builder_.add_many_vectors(many_vectors);
|
||||
builder_.add_far_struct_vector(far_struct_vector);
|
||||
builder_.add_near_string(near_string);
|
||||
builder_.add_far_string(far_string);
|
||||
builder_.add_a(a);
|
||||
builder_.add_far_vector(far_vector);
|
||||
return builder_.Finish();
|
||||
}
|
||||
|
||||
inline ::flatbuffers::Offset<RootTable> CreateRootTableDirect(
|
||||
::flatbuffers::FlatBufferBuilder64 &_fbb,
|
||||
const std::vector<uint8_t> *far_vector = nullptr,
|
||||
int32_t a = 0,
|
||||
const char *far_string = nullptr,
|
||||
const std::vector<uint8_t> *big_vector = nullptr,
|
||||
const char *near_string = nullptr,
|
||||
const std::vector<uint8_t> *nested_root = nullptr,
|
||||
const std::vector<LeafStruct> *far_struct_vector = nullptr,
|
||||
const std::vector<LeafStruct> *big_struct_vector = nullptr,
|
||||
const std::vector<::flatbuffers::Offset<WrapperTable>> *many_vectors = nullptr) {
|
||||
auto far_vector__ = far_vector ? _fbb.CreateVector64<::flatbuffers::Vector>(*far_vector) : 0;
|
||||
auto far_string__ = far_string ? _fbb.CreateString<::flatbuffers::Offset64>(far_string) : 0;
|
||||
auto big_vector__ = big_vector ? _fbb.CreateVector64(*big_vector) : 0;
|
||||
auto nested_root__ = nested_root ? _fbb.CreateVector64(*nested_root) : 0;
|
||||
auto far_struct_vector__ = far_struct_vector ? _fbb.CreateVectorOfStructs64<::flatbuffers::Vector>(*far_struct_vector) : 0;
|
||||
auto big_struct_vector__ = big_struct_vector ? _fbb.CreateVectorOfStructs64(*big_struct_vector) : 0;
|
||||
auto near_string__ = near_string ? _fbb.CreateString(near_string) : 0;
|
||||
auto many_vectors__ = many_vectors ? _fbb.CreateVector<::flatbuffers::Offset<WrapperTable>>(*many_vectors) : 0;
|
||||
return CreateRootTable(
|
||||
_fbb,
|
||||
far_vector__,
|
||||
a,
|
||||
far_string__,
|
||||
big_vector__,
|
||||
near_string__,
|
||||
nested_root__,
|
||||
far_struct_vector__,
|
||||
big_struct_vector__,
|
||||
many_vectors__);
|
||||
}
|
||||
|
||||
::flatbuffers::Offset<RootTable> CreateRootTable(::flatbuffers::FlatBufferBuilder64 &_fbb, const RootTableT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
|
||||
|
||||
|
||||
inline bool operator==(const WrapperTableT &lhs, const WrapperTableT &rhs) {
|
||||
return
|
||||
(lhs.vector == rhs.vector);
|
||||
}
|
||||
|
||||
inline bool operator!=(const WrapperTableT &lhs, const WrapperTableT &rhs) {
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
|
||||
inline WrapperTableT *WrapperTable::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
|
||||
auto _o = std::unique_ptr<WrapperTableT>(new WrapperTableT());
|
||||
UnPackTo(_o.get(), _resolver);
|
||||
return _o.release();
|
||||
}
|
||||
|
||||
inline void WrapperTable::UnPackTo(WrapperTableT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
|
||||
(void)_o;
|
||||
(void)_resolver;
|
||||
{ auto _e = vector(); if (_e) { _o->vector.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->vector.begin()); } }
|
||||
}
|
||||
|
||||
inline ::flatbuffers::Offset<WrapperTable> WrapperTable::Pack(::flatbuffers::FlatBufferBuilder64 &_fbb, const WrapperTableT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
|
||||
return CreateWrapperTable(_fbb, _o, _rehasher);
|
||||
}
|
||||
|
||||
inline ::flatbuffers::Offset<WrapperTable> CreateWrapperTable(::flatbuffers::FlatBufferBuilder64 &_fbb, const WrapperTableT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
|
||||
(void)_rehasher;
|
||||
(void)_o;
|
||||
struct _VectorArgs { ::flatbuffers::FlatBufferBuilder64 *__fbb; const WrapperTableT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
|
||||
auto _vector = _o->vector.size() ? _fbb.CreateVector64<::flatbuffers::Vector>(_o->vector) : 0;
|
||||
return CreateWrapperTable(
|
||||
_fbb,
|
||||
_vector);
|
||||
}
|
||||
|
||||
|
||||
inline bool operator==(const RootTableT &lhs, const RootTableT &rhs) {
|
||||
return
|
||||
(lhs.far_vector == rhs.far_vector) &&
|
||||
(lhs.a == rhs.a) &&
|
||||
(lhs.far_string == rhs.far_string) &&
|
||||
(lhs.big_vector == rhs.big_vector) &&
|
||||
(lhs.near_string == rhs.near_string) &&
|
||||
(lhs.nested_root == rhs.nested_root) &&
|
||||
(lhs.far_struct_vector == rhs.far_struct_vector) &&
|
||||
(lhs.big_struct_vector == rhs.big_struct_vector) &&
|
||||
(lhs.many_vectors.size() == rhs.many_vectors.size() && std::equal(lhs.many_vectors.cbegin(), lhs.many_vectors.cend(), rhs.many_vectors.cbegin(), [](std::unique_ptr<WrapperTableT> const &a, std::unique_ptr<WrapperTableT> const &b) { return (a == b) || (a && b && *a == *b); }));
|
||||
}
|
||||
|
||||
inline bool operator!=(const RootTableT &lhs, const RootTableT &rhs) {
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
|
||||
inline RootTableT::RootTableT(const RootTableT &o)
|
||||
: far_vector(o.far_vector),
|
||||
a(o.a),
|
||||
far_string(o.far_string),
|
||||
big_vector(o.big_vector),
|
||||
near_string(o.near_string),
|
||||
nested_root(o.nested_root),
|
||||
far_struct_vector(o.far_struct_vector),
|
||||
big_struct_vector(o.big_struct_vector) {
|
||||
many_vectors.reserve(o.many_vectors.size());
|
||||
for (const auto &many_vectors_ : o.many_vectors) { many_vectors.emplace_back((many_vectors_) ? new WrapperTableT(*many_vectors_) : nullptr); }
|
||||
}
|
||||
|
||||
inline RootTableT &RootTableT::operator=(RootTableT o) FLATBUFFERS_NOEXCEPT {
|
||||
std::swap(far_vector, o.far_vector);
|
||||
std::swap(a, o.a);
|
||||
std::swap(far_string, o.far_string);
|
||||
std::swap(big_vector, o.big_vector);
|
||||
std::swap(near_string, o.near_string);
|
||||
std::swap(nested_root, o.nested_root);
|
||||
std::swap(far_struct_vector, o.far_struct_vector);
|
||||
std::swap(big_struct_vector, o.big_struct_vector);
|
||||
std::swap(many_vectors, o.many_vectors);
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline RootTableT *RootTable::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
|
||||
auto _o = std::unique_ptr<RootTableT>(new RootTableT());
|
||||
UnPackTo(_o.get(), _resolver);
|
||||
return _o.release();
|
||||
}
|
||||
|
||||
inline void RootTable::UnPackTo(RootTableT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
|
||||
(void)_o;
|
||||
(void)_resolver;
|
||||
{ auto _e = far_vector(); if (_e) { _o->far_vector.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->far_vector.begin()); } }
|
||||
{ auto _e = a(); _o->a = _e; }
|
||||
{ auto _e = far_string(); if (_e) _o->far_string = _e->str(); }
|
||||
{ auto _e = big_vector(); if (_e) { _o->big_vector.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->big_vector.begin()); } }
|
||||
{ auto _e = near_string(); if (_e) _o->near_string = _e->str(); }
|
||||
{ auto _e = nested_root(); if (_e) { _o->nested_root.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->nested_root.begin()); } }
|
||||
{ auto _e = far_struct_vector(); if (_e) { _o->far_struct_vector.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->far_struct_vector[_i] = *_e->Get(_i); } } else { _o->far_struct_vector.resize(0); } }
|
||||
{ auto _e = big_struct_vector(); if (_e) { _o->big_struct_vector.resize(_e->size()); for (::flatbuffers::uoffset64_t _i = 0; _i < _e->size(); _i++) { _o->big_struct_vector[_i] = *_e->Get(_i); } } else { _o->big_struct_vector.resize(0); } }
|
||||
{ auto _e = many_vectors(); if (_e) { _o->many_vectors.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->many_vectors[_i]) { _e->Get(_i)->UnPackTo(_o->many_vectors[_i].get(), _resolver); } else { _o->many_vectors[_i] = std::unique_ptr<WrapperTableT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->many_vectors.resize(0); } }
|
||||
}
|
||||
|
||||
inline ::flatbuffers::Offset<RootTable> RootTable::Pack(::flatbuffers::FlatBufferBuilder64 &_fbb, const RootTableT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
|
||||
return CreateRootTable(_fbb, _o, _rehasher);
|
||||
}
|
||||
|
||||
inline ::flatbuffers::Offset<RootTable> CreateRootTable(::flatbuffers::FlatBufferBuilder64 &_fbb, const RootTableT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
|
||||
(void)_rehasher;
|
||||
(void)_o;
|
||||
struct _VectorArgs { ::flatbuffers::FlatBufferBuilder64 *__fbb; const RootTableT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
|
||||
auto _far_vector = _o->far_vector.size() ? _fbb.CreateVector64<::flatbuffers::Vector>(_o->far_vector) : 0;
|
||||
auto _a = _o->a;
|
||||
auto _far_string = _o->far_string.empty() ? 0 : _fbb.CreateString<::flatbuffers::Offset64>(_o->far_string);
|
||||
auto _big_vector = _o->big_vector.size() ? _fbb.CreateVector64(_o->big_vector) : 0;
|
||||
auto _near_string = _o->near_string.empty() ? 0 : _fbb.CreateString(_o->near_string);
|
||||
auto _nested_root = _o->nested_root.size() ? _fbb.CreateVector64(_o->nested_root) : 0;
|
||||
auto _far_struct_vector = _o->far_struct_vector.size() ? _fbb.CreateVectorOfStructs64<::flatbuffers::Vector>(_o->far_struct_vector) : 0;
|
||||
auto _big_struct_vector = _o->big_struct_vector.size() ? _fbb.CreateVectorOfStructs64(_o->big_struct_vector) : 0;
|
||||
auto _many_vectors = _o->many_vectors.size() ? _fbb.CreateVector<::flatbuffers::Offset<WrapperTable>> (_o->many_vectors.size(), [](size_t i, _VectorArgs *__va) { return CreateWrapperTable(*__va->__fbb, __va->__o->many_vectors[i].get(), __va->__rehasher); }, &_va ) : 0;
|
||||
return CreateRootTable(
|
||||
_fbb,
|
||||
_far_vector,
|
||||
_a,
|
||||
_far_string,
|
||||
_big_vector,
|
||||
_near_string,
|
||||
_nested_root,
|
||||
_far_struct_vector,
|
||||
_big_struct_vector,
|
||||
_many_vectors);
|
||||
}
|
||||
|
||||
inline const ::flatbuffers::TypeTable *LeafStructTypeTable() {
|
||||
static const ::flatbuffers::TypeCode type_codes[] = {
|
||||
{ ::flatbuffers::ET_INT, 0, -1 },
|
||||
{ ::flatbuffers::ET_DOUBLE, 0, -1 }
|
||||
};
|
||||
static const int64_t values[] = { 0, 8, 16 };
|
||||
static const char * const names[] = {
|
||||
"a",
|
||||
"b"
|
||||
};
|
||||
static const ::flatbuffers::TypeTable tt = {
|
||||
::flatbuffers::ST_STRUCT, 2, type_codes, nullptr, nullptr, values, names
|
||||
};
|
||||
return &tt;
|
||||
}
|
||||
|
||||
inline const ::flatbuffers::TypeTable *WrapperTableTypeTable() {
|
||||
static const ::flatbuffers::TypeCode type_codes[] = {
|
||||
{ ::flatbuffers::ET_CHAR, 1, -1 }
|
||||
};
|
||||
static const char * const names[] = {
|
||||
"vector"
|
||||
};
|
||||
static const ::flatbuffers::TypeTable tt = {
|
||||
::flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, nullptr, names
|
||||
};
|
||||
return &tt;
|
||||
}
|
||||
|
||||
inline const ::flatbuffers::TypeTable *RootTableTypeTable() {
|
||||
static const ::flatbuffers::TypeCode type_codes[] = {
|
||||
{ ::flatbuffers::ET_UCHAR, 1, -1 },
|
||||
{ ::flatbuffers::ET_INT, 0, -1 },
|
||||
{ ::flatbuffers::ET_STRING, 0, -1 },
|
||||
{ ::flatbuffers::ET_UCHAR, 1, -1 },
|
||||
{ ::flatbuffers::ET_STRING, 0, -1 },
|
||||
{ ::flatbuffers::ET_UCHAR, 1, -1 },
|
||||
{ ::flatbuffers::ET_SEQUENCE, 1, 0 },
|
||||
{ ::flatbuffers::ET_SEQUENCE, 1, 0 },
|
||||
{ ::flatbuffers::ET_SEQUENCE, 1, 1 }
|
||||
};
|
||||
static const ::flatbuffers::TypeFunction type_refs[] = {
|
||||
LeafStructTypeTable,
|
||||
WrapperTableTypeTable
|
||||
};
|
||||
static const char * const names[] = {
|
||||
"far_vector",
|
||||
"a",
|
||||
"far_string",
|
||||
"big_vector",
|
||||
"near_string",
|
||||
"nested_root",
|
||||
"far_struct_vector",
|
||||
"big_struct_vector",
|
||||
"many_vectors"
|
||||
};
|
||||
static const ::flatbuffers::TypeTable tt = {
|
||||
::flatbuffers::ST_TABLE, 9, type_codes, type_refs, nullptr, nullptr, names
|
||||
};
|
||||
return &tt;
|
||||
}
|
||||
|
||||
inline const RootTable *GetRootTable(const void *buf) {
|
||||
return ::flatbuffers::GetRoot<RootTable>(buf);
|
||||
}
|
||||
|
||||
inline const RootTable *GetSizePrefixedRootTable(const void *buf) {
|
||||
return ::flatbuffers::GetSizePrefixedRoot<RootTable,::flatbuffers::uoffset64_t>(buf);
|
||||
}
|
||||
|
||||
inline RootTable *GetMutableRootTable(void *buf) {
|
||||
return ::flatbuffers::GetMutableRoot<RootTable>(buf);
|
||||
}
|
||||
|
||||
inline RootTable *GetMutableSizePrefixedRootTable(void *buf) {
|
||||
return ::flatbuffers::GetMutableSizePrefixedRoot<RootTable,::flatbuffers::uoffset64_t>(buf);
|
||||
}
|
||||
|
||||
inline bool VerifyRootTableBuffer(
|
||||
::flatbuffers::Verifier &verifier) {
|
||||
return verifier.VerifyBuffer<RootTable>(nullptr);
|
||||
}
|
||||
|
||||
inline bool VerifySizePrefixedRootTableBuffer(
|
||||
::flatbuffers::Verifier &verifier) {
|
||||
return verifier.VerifySizePrefixedBuffer<RootTable,::flatbuffers::uoffset64_t>(nullptr);
|
||||
}
|
||||
|
||||
inline void FinishRootTableBuffer(
|
||||
::flatbuffers::FlatBufferBuilder64 &fbb,
|
||||
::flatbuffers::Offset<RootTable> root) {
|
||||
fbb.Finish(root);
|
||||
}
|
||||
|
||||
inline void FinishSizePrefixedRootTableBuffer(
|
||||
::flatbuffers::FlatBufferBuilder64 &fbb,
|
||||
::flatbuffers::Offset<RootTable> root) {
|
||||
fbb.FinishSizePrefixed(root);
|
||||
}
|
||||
|
||||
inline std::unique_ptr<RootTableT> UnPackRootTable(
|
||||
const void *buf,
|
||||
const ::flatbuffers::resolver_function_t *res = nullptr) {
|
||||
return std::unique_ptr<RootTableT>(GetRootTable(buf)->UnPack(res));
|
||||
}
|
||||
|
||||
inline std::unique_ptr<RootTableT> UnPackSizePrefixedRootTable(
|
||||
const void *buf,
|
||||
const ::flatbuffers::resolver_function_t *res = nullptr) {
|
||||
return std::unique_ptr<RootTableT>(GetSizePrefixedRootTable(buf)->UnPack(res));
|
||||
}
|
||||
|
||||
#endif // FLATBUFFERS_GENERATED_TEST64BIT_H_
|
@ -20,6 +20,12 @@ cc_test(
|
||||
name = "flatbuffers_test",
|
||||
testonly = 1,
|
||||
srcs = [
|
||||
"64bit/evolution/v1_generated.h",
|
||||
"64bit/evolution/v2_generated.h",
|
||||
"64bit/offset64_test.cpp",
|
||||
"64bit/offset64_test.h",
|
||||
"64bit/test_64bit_bfbs_generated.h",
|
||||
"64bit/test_64bit_generated.h",
|
||||
"alignment_test.cpp",
|
||||
"alignment_test.h",
|
||||
"alignment_test_generated.h",
|
||||
|
@ -910,7 +910,7 @@ class Monster extends Table
|
||||
|
||||
/**
|
||||
* @param FlatBufferBuilder $builder
|
||||
* @param int
|
||||
* @param VectorOffset
|
||||
* @return void
|
||||
*/
|
||||
public static function addPos(FlatBufferBuilder $builder, $pos)
|
||||
@ -1111,7 +1111,7 @@ class Monster extends Table
|
||||
|
||||
/**
|
||||
* @param FlatBufferBuilder $builder
|
||||
* @param int
|
||||
* @param VectorOffset
|
||||
* @return void
|
||||
*/
|
||||
public static function addEnemy(FlatBufferBuilder $builder, $enemy)
|
||||
@ -1155,7 +1155,7 @@ class Monster extends Table
|
||||
|
||||
/**
|
||||
* @param FlatBufferBuilder $builder
|
||||
* @param int
|
||||
* @param VectorOffset
|
||||
* @return void
|
||||
*/
|
||||
public static function addTestempty(FlatBufferBuilder $builder, $testempty)
|
||||
@ -1523,7 +1523,7 @@ class Monster extends Table
|
||||
|
||||
/**
|
||||
* @param FlatBufferBuilder $builder
|
||||
* @param int
|
||||
* @param VectorOffset
|
||||
* @return void
|
||||
*/
|
||||
public static function addParentNamespaceTest(FlatBufferBuilder $builder, $parentNamespaceTest)
|
||||
@ -1875,7 +1875,7 @@ class Monster extends Table
|
||||
|
||||
/**
|
||||
* @param FlatBufferBuilder $builder
|
||||
* @param int
|
||||
* @param VectorOffset
|
||||
* @return void
|
||||
*/
|
||||
public static function addNativeInline(FlatBufferBuilder $builder, $nativeInline)
|
||||
|
@ -80,7 +80,6 @@ void EvolutionTest(const std::string &tests_data_path) {
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void ConformTest() {
|
||||
const char ref[] = "table T { A:int; } enum E:byte { A }";
|
||||
|
||||
@ -111,9 +110,41 @@ void ConformTest() {
|
||||
const char ref2[] = "enum E:byte { A } table T2 { f:E; } ";
|
||||
test_conform(ref2, "enum E:int32 { A } table T2 { df:byte; f:E; }",
|
||||
"field renamed to different type: T2.df (renamed from T2.f)");
|
||||
|
||||
// Check conformity for Offset64-related changes.
|
||||
{
|
||||
const char ref[] = "table T { a:[uint8]; b:string; }";
|
||||
|
||||
// Adding a 'vector64' changes the type.
|
||||
test_conform(ref, "table T { a:[uint8] (vector64); b:string; }",
|
||||
"types differ for field: T.a");
|
||||
|
||||
// Adding a 'offset64' to the vector changes the type.
|
||||
test_conform(ref, "table T { a:[uint8] (offset64); b:string; }",
|
||||
"offset types differ for field: T.a");
|
||||
|
||||
// Adding a 'offset64' to the string also changes the type.
|
||||
test_conform(ref, "table T { a:[uint8]; b:string (offset64); }",
|
||||
"offset types differ for field: T.b");
|
||||
|
||||
// Now try the opposite direction of removing an attribute from an existing
|
||||
// field.
|
||||
|
||||
// Removing a 'vector64' changes the type.
|
||||
test_conform("table T { a:[uint8] (vector64); b:string; }", ref,
|
||||
"types differ for field: T.a");
|
||||
|
||||
// Removing a 'offset64' to the string also changes the type.
|
||||
test_conform("table T { a:[uint8] (offset64); b:string; }", ref,
|
||||
"offset types differ for field: T.a");
|
||||
|
||||
// Remove a 'offset64' to the string also changes the type.
|
||||
test_conform("table T { a:[uint8]; b:string (offset64); }", ref,
|
||||
"offset types differ for field: T.b");
|
||||
}
|
||||
}
|
||||
|
||||
void UnionDeprecationTest(const std::string& tests_data_path) {
|
||||
void UnionDeprecationTest(const std::string &tests_data_path) {
|
||||
const int NUM_VERSIONS = 2;
|
||||
std::string schemas[NUM_VERSIONS];
|
||||
std::string jsonfiles[NUM_VERSIONS];
|
||||
|
2
tests/fuzzer/.gitignore
vendored
2
tests/fuzzer/.gitignore
vendored
@ -15,4 +15,6 @@ fuzz-*.log
|
||||
annotated_binary.bfbs
|
||||
annotated_binary.bin
|
||||
|
||||
test_64bit.bin
|
||||
|
||||
monster_test.bfbs
|
@ -117,6 +117,7 @@ set(FlatBuffers_Library_SRCS
|
||||
${FLATBUFFERS_DIR}/src/binary_annotator.cpp
|
||||
${FLATBUFFERS_DIR}/src/util.cpp
|
||||
${FLATBUFFERS_DIR}/tests/test_assert.cpp
|
||||
${FLATBUFFERS_DIR}/tests/64bit/test_64bit_bfbs_generated.h
|
||||
)
|
||||
|
||||
include_directories(${FLATBUFFERS_DIR}/include)
|
||||
@ -175,6 +176,16 @@ add_custom_command(
|
||||
${CMAKE_CURRENT_BINARY_DIR}/seed_annotator/annotated_binary.bin
|
||||
)
|
||||
|
||||
add_executable(64bit_fuzzer flatbuffers_64bit_fuzzer.cc)
|
||||
target_link_libraries(64bit_fuzzer PRIVATE flatbuffers_fuzzed)
|
||||
add_custom_command(
|
||||
TARGET 64bit_fuzzer PRE_BUILD
|
||||
|
||||
COMMAND ${CMAKE_COMMAND} -E copy
|
||||
${CMAKE_SOURCE_DIR}/../64bit/test_64bit.bin
|
||||
${CMAKE_CURRENT_BINARY_DIR}/seed_64bit/test_64bit.bin
|
||||
)
|
||||
|
||||
# Build debugger for weird cases found with fuzzer.
|
||||
if(BUILD_DEBUGGER)
|
||||
add_library(flatbuffers_nonfuzz STATIC ${FlatBuffers_Library_SRCS})
|
||||
|
121
tests/fuzzer/flatbuffers_64bit_fuzzer.cc
Normal file
121
tests/fuzzer/flatbuffers_64bit_fuzzer.cc
Normal file
@ -0,0 +1,121 @@
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <type_traits>
|
||||
|
||||
#include "64bit/test_64bit_bfbs_generated.h"
|
||||
#include "64bit/test_64bit_generated.h"
|
||||
#include "flatbuffers/base.h"
|
||||
#include "flatbuffers/flatbuffer_builder.h"
|
||||
#include "flatbuffers/flatbuffers.h"
|
||||
#include "flatbuffers/reflection.h"
|
||||
#include "flatbuffers/verifier.h"
|
||||
#include "test_assert.h"
|
||||
#include "test_init.h"
|
||||
|
||||
OneTimeTestInit OneTimeTestInit::one_time_init_;
|
||||
|
||||
static RootTableBinarySchema schema;
|
||||
|
||||
static constexpr uint8_t flags_sized_prefixed = 0b00000001;
|
||||
|
||||
static const uint64_t kFnvPrime = 0x00000100000001b3ULL;
|
||||
static const uint64_t kOffsetBasis = 0xcbf29ce484222645ULL;
|
||||
|
||||
namespace flatbuffers {
|
||||
|
||||
template<typename T, typename = std::enable_if_t<std::is_integral_v<T>>>
|
||||
uint64_t Hash(T value, uint64_t hash) {
|
||||
return (hash * kFnvPrime) ^ value;
|
||||
}
|
||||
|
||||
uint64_t Hash(double value, uint64_t hash) {
|
||||
static_assert(sizeof(double) == sizeof(uint64_t));
|
||||
return (hash * kFnvPrime) ^ static_cast<uint64_t>(value);
|
||||
}
|
||||
|
||||
uint64_t Hash(const flatbuffers::String *value, uint64_t hash) {
|
||||
if (value == nullptr) { return hash * kFnvPrime; }
|
||||
for (auto &c : value->str()) { hash = Hash(static_cast<uint8_t>(c), hash); }
|
||||
return hash;
|
||||
}
|
||||
|
||||
uint64_t Hash(const LeafStruct *value, uint64_t hash) {
|
||||
if (value == nullptr) { return hash * kFnvPrime; }
|
||||
hash = Hash(value->a(), hash);
|
||||
hash = Hash(value->b(), hash);
|
||||
return hash;
|
||||
}
|
||||
|
||||
template<typename T> uint64_t Hash(const Vector<T> *value, uint64_t hash) {
|
||||
if (value == nullptr) { return hash * kFnvPrime; }
|
||||
for (const T c : *value) { hash = Hash(c, hash); }
|
||||
return hash;
|
||||
}
|
||||
|
||||
template<typename T> uint64_t Hash(const Vector64<T> *value, uint64_t hash) {
|
||||
if (value == nullptr) { return hash * kFnvPrime; }
|
||||
for (const T c : *value) { hash = Hash(c, hash); }
|
||||
return hash;
|
||||
}
|
||||
|
||||
uint64_t Hash(const RootTable *value, uint64_t hash) {
|
||||
if (value == nullptr) { return hash * kFnvPrime; }
|
||||
// Hash all the fields so we can exercise all parts of the code.
|
||||
hash = Hash(value->far_vector(), hash);
|
||||
hash = Hash(value->a(), hash);
|
||||
hash = Hash(value->far_string(), hash);
|
||||
hash = Hash(value->big_vector(), hash);
|
||||
hash = Hash(value->near_string(), hash);
|
||||
hash = Hash(value->nested_root(), hash);
|
||||
hash = Hash(value->far_struct_vector(), hash);
|
||||
hash = Hash(value->big_struct_vector(), hash);
|
||||
return hash;
|
||||
}
|
||||
|
||||
static int AccessBuffer(const uint8_t *data, size_t size,
|
||||
bool is_size_prefixed) {
|
||||
const RootTable *root_table =
|
||||
is_size_prefixed ? GetSizePrefixedRootTable(data) : GetRootTable(data);
|
||||
TEST_NOTNULL(root_table);
|
||||
|
||||
uint64_t hash = kOffsetBasis;
|
||||
hash = Hash(root_table, hash);
|
||||
hash = Hash(root_table->nested_root_nested_root(), hash);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern "C" int LLVMFuzzerInitialize(int *, char ***argv) {
|
||||
Verifier verifier(schema.begin(), schema.size());
|
||||
TEST_EQ(true, reflection::VerifySchemaBuffer(verifier));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
|
||||
if (size < FLATBUFFERS_MIN_BUFFER_SIZE) { return 0; }
|
||||
|
||||
// Take the first bit of data as a flag to control things.
|
||||
const uint8_t flags = data[0];
|
||||
data++;
|
||||
size--;
|
||||
|
||||
Verifier::Options options;
|
||||
options.assert = true;
|
||||
options.check_alignment = true;
|
||||
options.check_nested_flatbuffers = true;
|
||||
|
||||
Verifier verifier(data, size, options);
|
||||
|
||||
const bool is_size_prefixed = flags & flags_sized_prefixed;
|
||||
|
||||
// Filter out data that isn't valid.
|
||||
if ((is_size_prefixed && !VerifySizePrefixedRootTableBuffer(verifier)) ||
|
||||
!VerifyRootTableBuffer(verifier)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return AccessBuffer(data, size, is_size_prefixed);
|
||||
}
|
||||
|
||||
} // namespace flatbuffers
|
@ -46,7 +46,7 @@ extern "C" int LLVMFuzzerInitialize(int *, char ***argv) {
|
||||
|
||||
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
|
||||
flatbuffers::BinaryAnnotator annotator(schema_bfbs_, schema_bfbs_length_,
|
||||
data, size);
|
||||
data, size, false);
|
||||
|
||||
annotator.Annotate();
|
||||
return 0;
|
||||
|
@ -16,8 +16,9 @@ static const auto infinity_f = std::numeric_limits<float>::infinity();
|
||||
static const auto infinity_d = std::numeric_limits<double>::infinity();
|
||||
|
||||
// Test that parser errors are actually generated.
|
||||
static void TestError_(const char *src, const char *error_substr, bool strict_json,
|
||||
const char *file, int line, const char *func) {
|
||||
static void TestError_(const char *src, const char *error_substr,
|
||||
bool strict_json, const char *file, int line,
|
||||
const char *func) {
|
||||
flatbuffers::IDLOptions opts;
|
||||
opts.strict_json = strict_json;
|
||||
flatbuffers::Parser parser(opts);
|
||||
@ -32,8 +33,8 @@ static void TestError_(const char *src, const char *error_substr, bool strict_js
|
||||
}
|
||||
}
|
||||
|
||||
static void TestError_(const char *src, const char *error_substr, const char *file,
|
||||
int line, const char *func) {
|
||||
static void TestError_(const char *src, const char *error_substr,
|
||||
const char *file, int line, const char *func) {
|
||||
TestError_(src, error_substr, false, file, line, func);
|
||||
}
|
||||
|
||||
@ -47,7 +48,7 @@ static void TestError_(const char *src, const char *error_substr, const char *fi
|
||||
|
||||
static bool FloatCompare(float a, float b) { return fabs(a - b) < 0.001; }
|
||||
|
||||
} // namespace
|
||||
} // namespace
|
||||
|
||||
// Test that parsing errors occur as we'd expect.
|
||||
// Also useful for coverage, making sure these paths are run.
|
||||
@ -124,6 +125,34 @@ void ErrorTest() {
|
||||
// An identifier can't start from sign (+|-)
|
||||
TestError("table X { -Y: int; } root_type Y: {Y:1.0}", "identifier");
|
||||
TestError("table X { +Y: int; } root_type Y: {Y:1.0}", "identifier");
|
||||
|
||||
// Offset64
|
||||
TestError("table X { a:int (vector64); }", "`vector64` attribute");
|
||||
TestError("table X { a:int (offset64); }", "`offset64` attribute");
|
||||
TestError("table X { a:string (vector64); }", "`vector64` attribute");
|
||||
TestError("table y { a:int; } table X { a:y (offset64); }",
|
||||
"`offset64` attribute");
|
||||
TestError("struct y { a:int; } table X { a:y (offset64); }",
|
||||
"`offset64` attribute");
|
||||
TestError("table y { a:int; } table X { a:y (vector64); }",
|
||||
"`vector64` attribute");
|
||||
TestError("union Y { } table X { ys:Y (offset64); }", "`offset64` attribute");
|
||||
|
||||
TestError("table Y { a:int; } table X { ys:[Y] (offset64); }",
|
||||
"only vectors of scalars are allowed to be 64-bit.");
|
||||
TestError("table Y { a:int; } table X { ys:[Y] (vector64); }",
|
||||
"only vectors of scalars are allowed to be 64-bit.");
|
||||
TestError("union Y { } table X { ys:[Y] (vector64); }",
|
||||
"only vectors of scalars are allowed to be 64-bit.");
|
||||
|
||||
// TOOD(derekbailey): the following three could be allowed once the code gen
|
||||
// supports the output.
|
||||
TestError("table X { y:[string] (offset64); }",
|
||||
"only vectors of scalars are allowed to be 64-bit.");
|
||||
TestError("table X { y:[string] (vector64); }",
|
||||
"only vectors of scalars are allowed to be 64-bit.");
|
||||
TestError("enum X:byte {Z} table X { y:[X] (offset64); }",
|
||||
"only vectors of scalars are allowed to be 64-bit.");
|
||||
}
|
||||
|
||||
void EnumOutOfRangeTest() {
|
||||
@ -776,8 +805,6 @@ void UnicodeSurrogatesTest() {
|
||||
TEST_EQ_STR(string->c_str(), "\xF0\x9F\x92\xA9");
|
||||
}
|
||||
|
||||
|
||||
|
||||
void UnknownFieldsTest() {
|
||||
flatbuffers::IDLOptions opts;
|
||||
opts.skip_unexpected_fields_in_json = true;
|
||||
|
@ -42,7 +42,7 @@
|
||||
#if !defined(_MSC_VER) || _MSC_VER >= 1700
|
||||
# include "arrays_test_generated.h"
|
||||
#endif
|
||||
|
||||
#include "64bit/offset64_test.h"
|
||||
#include "flexbuffers_test.h"
|
||||
#include "is_quiet_nan.h"
|
||||
#include "monster_test_bfbs_generated.h" // Generated using --bfbs-comments --bfbs-builtins --cpp --bfbs-gen-embed
|
||||
@ -74,7 +74,7 @@ static_assert(flatbuffers::is_same<uint8_t, char>::value ||
|
||||
using namespace MyGame::Example;
|
||||
|
||||
void TriviallyCopyableTest() {
|
||||
// clang-format off
|
||||
// clang-format off
|
||||
#if __GNUG__ && __GNUC__ < 5 && \
|
||||
!(defined(__clang__) && __clang_major__ >= 16)
|
||||
TEST_EQ(__has_trivial_copy(Vec3), true);
|
||||
@ -1540,6 +1540,17 @@ void DoNotRequireEofTest(const std::string &tests_data_path) {
|
||||
}
|
||||
#endif
|
||||
|
||||
static void Offset64Tests() {
|
||||
Offset64Test();
|
||||
Offset64SerializedFirst();
|
||||
Offset64NestedFlatBuffer();
|
||||
Offset64CreateDirect();
|
||||
Offset64Evolution();
|
||||
Offset64VectorOfStructs();
|
||||
Offset64SizePrefix();
|
||||
Offset64ManyVectors();
|
||||
}
|
||||
|
||||
int FlatBufferTests(const std::string &tests_data_path) {
|
||||
// Run our various test suites:
|
||||
|
||||
@ -1651,6 +1662,7 @@ int FlatBufferTests(const std::string &tests_data_path) {
|
||||
NestedStructKeyInStructTest();
|
||||
FixedSizedStructArrayKeyInStructTest();
|
||||
EmbeddedSchemaAccess();
|
||||
Offset64Tests();
|
||||
return 0;
|
||||
}
|
||||
} // namespace
|
||||
|
@ -1,20 +1,22 @@
|
||||
#include "test_builder.h"
|
||||
|
||||
#include "flatbuffers/flatbuffer_builder.h"
|
||||
#include "flatbuffers/stl_emulation.h"
|
||||
#include "monster_test_generated.h"
|
||||
|
||||
using namespace MyGame::Example;
|
||||
using namespace flatbuffers;
|
||||
|
||||
struct OwnedAllocator : public flatbuffers::DefaultAllocator {};
|
||||
struct OwnedAllocator : public DefaultAllocator {};
|
||||
|
||||
class TestHeapBuilder : public flatbuffers::FlatBufferBuilder {
|
||||
class TestHeapBuilder : public FlatBufferBuilder {
|
||||
private:
|
||||
TestHeapBuilder(const TestHeapBuilder &);
|
||||
TestHeapBuilder &operator=(const TestHeapBuilder &);
|
||||
|
||||
public:
|
||||
TestHeapBuilder()
|
||||
: flatbuffers::FlatBufferBuilder(2048, new OwnedAllocator(), true) {}
|
||||
: FlatBufferBuilder(2048, new OwnedAllocator(), true) {}
|
||||
|
||||
TestHeapBuilder(TestHeapBuilder &&other)
|
||||
: FlatBufferBuilder(std::move(other)) {}
|
||||
@ -31,14 +33,14 @@ struct AllocatorMember {
|
||||
};
|
||||
|
||||
struct GrpcLikeMessageBuilder : private AllocatorMember,
|
||||
public flatbuffers::FlatBufferBuilder {
|
||||
public FlatBufferBuilder {
|
||||
private:
|
||||
GrpcLikeMessageBuilder(const GrpcLikeMessageBuilder &);
|
||||
GrpcLikeMessageBuilder &operator=(const GrpcLikeMessageBuilder &);
|
||||
|
||||
public:
|
||||
GrpcLikeMessageBuilder()
|
||||
: flatbuffers::FlatBufferBuilder(1024, &member_allocator_, false) {}
|
||||
: FlatBufferBuilder(1024, &member_allocator_, false) {}
|
||||
|
||||
GrpcLikeMessageBuilder(GrpcLikeMessageBuilder &&other)
|
||||
: FlatBufferBuilder(1024, &member_allocator_, false) {
|
||||
|
@ -17,10 +17,6 @@ class MessageBuilder;
|
||||
}
|
||||
} // namespace flatbuffers
|
||||
|
||||
template<class T, class U> struct is_same { static const bool value = false; };
|
||||
|
||||
template<class T> struct is_same<T, T> { static const bool value = true; };
|
||||
|
||||
inline std::string m1_name() { return "Cyberdemon"; }
|
||||
inline std::string m2_name() { return "Imp"; }
|
||||
inline MyGame::Example::Color m1_color() {
|
||||
@ -166,7 +162,7 @@ struct BuilderTests {
|
||||
}
|
||||
|
||||
static void builder_swap_before_finish_test(
|
||||
bool run = is_same<DestBuilder, SrcBuilder>::value) {
|
||||
bool run = std::is_same<DestBuilder, SrcBuilder>::value) {
|
||||
/// Swap is allowed only when lhs and rhs are the same concrete type.
|
||||
if (run) {
|
||||
SrcBuilder src;
|
||||
@ -186,7 +182,7 @@ struct BuilderTests {
|
||||
}
|
||||
|
||||
static void builder_swap_after_finish_test(
|
||||
bool run = is_same<DestBuilder, SrcBuilder>::value) {
|
||||
bool run = std::is_same<DestBuilder, SrcBuilder>::value) {
|
||||
/// Swap is allowed only when lhs and rhs are the same concrete type.
|
||||
if (run) {
|
||||
SrcBuilder src;
|
||||
|
Loading…
x
Reference in New Issue
Block a user