15 Generator(
".",
"Database_Writable.h", options)
29#include "joedb/journal/Writable_Journal.h"
30#include "joedb/journal/Memory_File.h"
31#include "joedb/error/Out_Of_Date.h"
46 extern const char * schema_string;
47 inline constexpr size_t schema_string_size = )RRR";
51 for (
const auto &[tid, tname]: tables)
52 out <<
" class container_of_" << tname <<
";\n";
56 class Database_Writable: public Database, public joedb::Writable
59 joedb::index_t max_record_id;
60 Table_Id current_table_id = Table_Id{0};
67 out <<
" void delete_from(Table_Id table_id, Record_Id record_id) override\n";
71 for (
const auto &[tid, tname]: tables)
79 out <<
"if (table_id == Table_Id(" << tid <<
"))\n";
80 out <<
" internal_delete_" << tname <<
"(record_id);\n";
89 out <<
" void insert_into(Table_Id table_id, Record_Id record_id) override\n";
91 out <<
" if (to_underlying(record_id) < 0 || (max_record_id && to_underlying(record_id) >= max_record_id))\n";
92 out <<
" throw_exception(\"insert_into: too big\");\n";
95 for (
const auto &[tid, tname]: tables)
103 out <<
"if (table_id == Table_Id(" << tid <<
"))\n";
105 out <<
" if (is_valid_record_id_for_" << tname <<
"(record_id))\n";
106 out <<
" throw_exception(\"Duplicate insert into table " << tname <<
"\");\n";
107 out <<
" if (storage_of_" << tname <<
".size() <= size_t(record_id))\n";
108 out <<
" storage_of_" << tname <<
".resize(to_underlying(record_id) + 1);\n";
109 out <<
" internal_insert_" << tname <<
"(record_id);\n";
126 joedb::Freedom_Keeper *fk = nullptr;
132 for (
const auto &[tid, tname]: tables)
140 out <<
"if (table_id == Table_Id(" << tid <<
"))\n";
141 out <<
" fk = &storage_of_" << tname <<
".freedom_keeper;\n";
148 JOEDB_RELEASE_ASSERT(fk->is_used_vector(record_id, size));
149 joedb::Writable::delete_vector(table_id, record_id, size);
162 to_underlying(record_id) < 0 ||
163 (max_record_id && (to_underlying(record_id) >= max_record_id || joedb::index_t(size) >= max_record_id))
166 throw_exception("insert_vector: null record_id, or too big");
172 for (
const auto &[tid, tname]: tables)
180 out <<
"if (table_id == Table_Id(" << tid <<
"))\n";
182 out <<
" if (storage_of_" << tname <<
".size() < size_t(record_id) + size)\n";
183 out <<
" storage_of_" << tname <<
".resize(to_underlying(record_id) + size);\n";
184 out <<
" internal_vector_insert_" << tname <<
"(record_id, size);\n";
193 std::set<Type::Type_Id> db_types;
195 for (
const auto &[tid, tname]: tables)
196 for (
const auto &[fid, fname]: db.
get_fields(tid))
203 for (
int type_index = 1; type_index < int(
Type::type_ids); type_index++)
206 if (db_types.find(type_id) == db_types.end())
212 out <<
" Table_Id table_id,\n";
213 out <<
" Record_Id record_id,\n";
214 out <<
" Field_Id field_id,\n";
217 out <<
" override\n";
220 for (
const auto &[tid, tname]: tables)
222 bool has_typed_field =
false;
224 for (
const auto &[fid, fname]: db.
get_fields(tid))
229 has_typed_field =
true;
236 out <<
" if (table_id == Table_Id(" << tid <<
"))\n";
239 for (
const auto &[fid, fname]: db.
get_fields(tid))
244 out <<
" if (field_id == Field_Id(" << fid <<
"))\n";
246 out <<
" internal_update_" << tname;
247 out <<
"__" << fname <<
"(record_id, ";
248 if (type.
get_type_id() != Type::Type_Id::reference)
274 for (
int type_index = 1; type_index < int(
Type::type_ids); type_index++)
283 out <<
" Table_Id table_id,\n";
284 out <<
" Record_Id record_id,\n";
285 out <<
" Field_Id field_id,\n";
286 out <<
" size_t size,\n";
289 out <<
" override\n";
292 for (
const auto &[tid, tname]: tables)
294 bool has_typed_field =
false;
296 for (
const auto &[fid, fname]: db.
get_fields(tid))
301 has_typed_field =
true;
308 out <<
" if (table_id == Table_Id(" << tid <<
"))\n";
311 for (
const auto &[fid, fname]: db.
get_fields(tid))
316 out <<
" if (field_id == Field_Id(" << fid <<
"))\n";
318 out <<
" internal_update_vector_" << tname;
319 out <<
"__" << fname <<
"(record_id, size, ";
321 if (type_id != joedb::Type::Type_Id::reference)
325 out <<
"reinterpret_cast<const ";
349 for (
int type_index = 1; type_index < int(
Type::type_ids); type_index++)
359 out <<
" Table_Id table_id,\n";
360 out <<
" Record_Id record_id,\n";
361 out <<
" Field_Id field_id,\n";
362 out <<
" size_t &capacity\n";
364 out <<
" override\n";
367 for (
const auto &[tid, tname]: tables)
369 bool has_typed_field =
false;
371 for (
const auto &[fid, fname]: db.
get_fields(tid))
376 has_typed_field =
true;
383 out <<
" if (table_id == Table_Id(" << tid <<
"))\n";
385 out <<
" capacity = size_t(storage_of_" << tname <<
".freedom_keeper.size());\n";
387 for (
const auto &[fid, fname]: db.
get_fields(tid))
392 out <<
" if (field_id == Field_Id(" << fid <<
"))\n"
396 if (type_id == Type::Type_Id::reference)
397 out <<
"reinterpret_cast<Record_Id *>";
399 out <<
"(storage_of_" << tname;
400 out <<
".field_value_of_" << fname <<
".data() + to_underlying(record_id));\n"
405 out <<
" return nullptr;\n";
410 out <<
" return nullptr;\n";
419 void comment(const std::string &comment) override {}
420 void timestamp(int64_t timestamp) override {}
421 void valid_data() override {}
428 bool upgrading_schema = false;
429 joedb::Memory_File schema_file;
430 joedb::Writable_Journal schema_journal;
432 bool requires_schema_upgrade() const
434 return schema_file.get_data().size() < detail::schema_string_size;
439 constexpr size_t pos = joedb::Header::size;
440 const size_t schema_file_size = schema_file.get_data().size();
444 schema_file_size < pos ||
445 schema_file_size > detail::schema_string_size ||
448 schema_file.get_data().data() + pos,
449 detail::schema_string + pos,
450 schema_file_size - pos
454 throw_exception("Trying to open a file with incompatible schema");
458 void create_table(const std::string &name) override
461 schema_journal.create_table(name);
462 schema_journal.soft_checkpoint();
465 void drop_table(Table_Id table_id) override
467 schema_journal.drop_table(table_id);
468 schema_journal.soft_checkpoint();
474 const std::string &name
477 schema_journal.rename_table(table_id, name);
478 schema_journal.soft_checkpoint();
484 const std::string &name,
488 schema_journal.add_field(table_id, name, type);
489 schema_journal.soft_checkpoint();
492 void drop_field(Table_Id table_id, Field_Id field_id) override
494 schema_journal.drop_field(table_id, field_id);
495 schema_journal.soft_checkpoint();
502 const std::string &name
505 schema_journal.rename_field(table_id, field_id, name);
506 schema_journal.soft_checkpoint();
509 void custom(const std::string &name) override
511 schema_journal.custom(name);
512 schema_journal.soft_checkpoint();
523 schema_journal(schema_file)
526 void set_max_record_id(joedb::index_t record_id)
528 max_record_id = record_id;
531 int64_t get_schema_checkpoint() const
533 return schema_journal.get_checkpoint();
536 void initialize_with_readonly_journal(joedb::Readonly_Journal &journal)
538 max_record_id = size_t(journal.get_checkpoint());
539 journal.replay_log(*this);
544 if (requires_schema_upgrade())
545 throw_exception<joedb::Out_Of_Date>("Schema is out of date. Can't upgrade a read-only database.");
const std::vector< std::string > & get_name_space() const
const Database & get_db() const
const std::map< Table_Id, std::string > & get_tables() const override
const Type & get_field_type(Table_Id table_id, Field_Id field_id) const override
const std::map< Field_Id, std::string > & get_fields(Table_Id table_id) const override
int64_t get_size() const override
Get the size of the file, or -1 if it is unknown.
const std::string & get_table_name(Table_Id table_id) const
Table_Id get_table_id() const
Type_Id get_type_id() const
Database_Writable_h(const Compiler_Options &options)
static const char * get_storage_type_string(Type type)
static const char * get_type_string(Type type)
void write_type(Type type, bool return_type, bool setter_type)
static const char * get_cpp_type_string(Type type)
const Compiler_Options & options
void namespace_open(std::ostream &out, const std::vector< std::string > &n)
void namespace_close(std::ostream &out, const std::vector< std::string > &n)
void namespace_include_guard(std::ostream &out, const char *name, const std::vector< std::string > &n)
One code generator for each of the file generated by joedbc.