aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBoris Kolpackov <boris@codesynthesis.com>2012-10-29 10:03:36 +0200
committerBoris Kolpackov <boris@codesynthesis.com>2012-10-29 10:03:36 +0200
commitf8677f8bfaa68e6714f4af7b030d0f365e60b918 (patch)
tree5f64e69bd7261348d966b80d0433f6bd34be76f2
parent938d136b09b868f633574c722d48c2953dde1be4 (diff)
Add support for database prefixes in command line interface
-rw-r--r--odb/context.cxx18
-rw-r--r--odb/generator.cxx268
-rw-r--r--odb/include.cxx9
-rw-r--r--odb/odb.cxx110
-rw-r--r--odb/option-functions.cxx11
-rw-r--r--odb/option-parsers.hxx201
-rw-r--r--odb/options.cli62
-rw-r--r--odb/relational/schema.hxx2
-rw-r--r--odb/validator.cxx2
9 files changed, 450 insertions, 233 deletions
diff --git a/odb/context.cxx b/odb/context.cxx
index 2350ad1..fbab993 100644
--- a/odb/context.cxx
+++ b/odb/context.cxx
@@ -446,10 +446,12 @@ context (ostream& os_,
include_regex (data_->include_regex_),
accessor_regex (data_->accessor_regex_),
modifier_regex (data_->modifier_regex_),
- embedded_schema (ops.generate_schema () &&
- ops.schema_format ().count (schema_format::embedded)),
- separate_schema (ops.generate_schema () &&
- ops.schema_format ().count (schema_format::separate)),
+ embedded_schema (
+ ops.generate_schema () &&
+ ops.schema_format ()[db].count (schema_format::embedded)),
+ separate_schema (
+ ops.generate_schema () &&
+ ops.schema_format ()[db].count (schema_format::separate)),
top_object (data_->top_object_),
cur_object (data_->cur_object_)
{
@@ -1146,9 +1148,9 @@ schema (semantics::scope& s) const
// If we are still not fully qualified, add the schema that was
// specified on the command line.
//
- if (!r.fully_qualified () && options.schema_specified ())
+ if (!r.fully_qualified () && options.schema ().count (db) != 0)
{
- qname n (options.schema ());
+ qname n (options.schema ()[db]);
n.append (r);
n.swap (r);
}
@@ -1189,8 +1191,8 @@ table_name_prefix (semantics::scope& s) const
// Add the prefix that was specified on the command line.
//
- if (options.table_prefix_specified ())
- r = options.table_prefix () + r;
+ if (options.table_prefix ().count (db) != 0)
+ r = options.table_prefix ()[db] + r;
s.set ("table-prefix", r);
return r;
diff --git a/odb/generator.cxx b/odb/generator.cxx
index d6456d9..ec98a15 100644
--- a/odb/generator.cxx
+++ b/odb/generator.cxx
@@ -25,6 +25,7 @@ using namespace std;
using namespace cutl;
using semantics::path;
+typedef vector<string> strings;
typedef vector<path> paths;
namespace
@@ -52,20 +53,21 @@ namespace
}
void
- append (ostream& os, vector<string> const& text, string const& file)
+ append (ostream& os, strings const& text)
{
- for (vector<string>::const_iterator i (text.begin ());
+ for (strings::const_iterator i (text.begin ());
i != text.end (); ++i)
{
os << *i << endl;
}
+ }
- if (!file.empty ())
- {
- ifstream ifs;
- open (ifs, file);
- os << ifs.rdbuf ();
- }
+ void
+ append (ostream& os, string const& file)
+ {
+ ifstream ifs;
+ open (ifs, file);
+ os << ifs.rdbuf ();
}
}
@@ -196,7 +198,7 @@ generate (options const& ops,
//
//
bool gen_sql_schema (ops.generate_schema () &&
- ops.schema_format ().count (schema_format::sql) &&
+ ops.schema_format ()[db].count (schema_format::sql) &&
db != database::common);
ofstream sql;
@@ -219,7 +221,7 @@ generate (options const& ops,
bool gen_sep_schema (
gen_cxx &&
ops.generate_schema () &&
- ops.schema_format ().count (schema_format::separate) &&
+ ops.schema_format ()[db].count (schema_format::separate) &&
db != database::common);
ofstream sch;
@@ -295,14 +297,22 @@ generate (options const& ops,
// Copy prologue.
//
- if (!ops.hxx_prologue ().empty () || !ops.hxx_prologue_file ().empty ())
{
- hxx << "// Begin prologue." << endl
- << "//" << endl;
- append (hxx, ops.hxx_prologue (), ops.hxx_prologue_file ());
- hxx << "//" << endl
- << "// End prologue." << endl
- << endl;
+ bool p (ops.hxx_prologue ().count (db) != 0);
+ bool pf (ops.hxx_prologue_file ().count (db) != 0);
+
+ if (p || pf)
+ {
+ hxx << "// Begin prologue." << endl
+ << "//" << endl;
+ if (p)
+ append (hxx, ops.hxx_prologue ()[db]);
+ if (pf)
+ append (hxx, ops.hxx_prologue_file ()[db]);
+ hxx << "//" << endl
+ << "// End prologue." << endl
+ << endl;
+ }
}
// Include main file(s).
@@ -359,14 +369,22 @@ generate (options const& ops,
// Copy epilogue.
//
- if (!ops.hxx_epilogue ().empty () || !ops.hxx_epilogue_file ().empty ())
{
- hxx << "// Begin epilogue." << endl
- << "//" << endl;
- append (hxx, ops.hxx_epilogue (), ops.hxx_epilogue_file ());
- hxx << "//" << endl
- << "// End epilogue." << endl
- << endl;
+ bool e (ops.hxx_epilogue ().count (db) != 0);
+ bool ef (ops.hxx_epilogue_file ().count (db) != 0);
+
+ if (e || ef)
+ {
+ hxx << "// Begin epilogue." << endl
+ << "//" << endl;
+ if (e)
+ append (hxx, ops.hxx_epilogue ()[db]);
+ if (ef)
+ append (hxx, ops.hxx_epilogue_file ()[db]);
+ hxx << "//" << endl
+ << "// End epilogue." << endl
+ << endl;
+ }
}
hxx << "#include <odb/post.hxx>" << endl
@@ -391,14 +409,22 @@ generate (options const& ops,
// Copy prologue.
//
- if (!ops.ixx_prologue ().empty () || !ops.ixx_prologue_file ().empty ())
{
- ixx << "// Begin prologue." << endl
- << "//" << endl;
- append (ixx, ops.ixx_prologue (), ops.ixx_prologue_file ());
- ixx << "//" << endl
- << "// End prologue." << endl
- << endl;
+ bool p (ops.ixx_prologue ().count (db) != 0);
+ bool pf (ops.ixx_prologue_file ().count (db) != 0);
+
+ if (p || pf)
+ {
+ ixx << "// Begin prologue." << endl
+ << "//" << endl;
+ if (p)
+ append (ixx, ops.ixx_prologue ()[db]);
+ if (pf)
+ append (ixx, ops.ixx_prologue_file ()[db]);
+ ixx << "//" << endl
+ << "// End prologue." << endl
+ << endl;
+ }
}
{
@@ -430,13 +456,22 @@ generate (options const& ops,
// Copy epilogue.
//
- if (!ops.ixx_epilogue ().empty () || !ops.ixx_epilogue_file ().empty ())
{
- ixx << "// Begin epilogue." << endl
- << "//" << endl;
- append (ixx, ops.ixx_epilogue (), ops.ixx_epilogue_file ());
- ixx << "//" << endl
- << "// End epilogue." << endl;
+ bool e (ops.ixx_epilogue ().count (db) != 0);
+ bool ef (ops.ixx_epilogue_file ().count (db) != 0);
+
+ if (e || ef)
+ {
+ ixx << "// Begin epilogue." << endl
+ << "//" << endl;
+ if (e)
+ append (ixx, ops.ixx_epilogue ()[db]);
+ if (ef)
+ append (ixx, ops.ixx_epilogue_file ()[db]);
+ ixx << "//" << endl
+ << "// End epilogue." << endl
+ << endl;
+ }
}
if (ops.show_sloc ())
@@ -459,14 +494,22 @@ generate (options const& ops,
// Copy prologue.
//
- if (!ops.cxx_prologue ().empty () || !ops.cxx_prologue_file ().empty ())
{
- cxx << "// Begin prologue." << endl
- << "//" << endl;
- append (cxx, ops.cxx_prologue (), ops.cxx_prologue_file ());
- cxx << "//" << endl
- << "// End prologue." << endl
- << endl;
+ bool p (ops.cxx_prologue ().count (db) != 0);
+ bool pf (ops.cxx_prologue_file ().count (db) != 0);
+
+ if (p || pf)
+ {
+ cxx << "// Begin prologue." << endl
+ << "//" << endl;
+ if (p)
+ append (cxx, ops.cxx_prologue ()[db]);
+ if (pf)
+ append (cxx, ops.cxx_prologue_file ()[db]);
+ cxx << "//" << endl
+ << "// End prologue." << endl
+ << endl;
+ }
}
cxx << "#include " << ctx->process_include_path (hxx_name) << endl
@@ -503,14 +546,22 @@ generate (options const& ops,
// Copy epilogue.
//
- if (!ops.cxx_epilogue ().empty () || !ops.cxx_epilogue_file ().empty ())
{
- cxx << "// Begin epilogue." << endl
- << "//" << endl;
- append (cxx, ops.cxx_epilogue (), ops.cxx_epilogue_file ());
- cxx << "//" << endl
- << "// End epilogue." << endl
- << endl;
+ bool e (ops.cxx_epilogue ().count (db) != 0);
+ bool ef (ops.cxx_epilogue_file ().count (db) != 0);
+
+ if (e || ef)
+ {
+ cxx << "// Begin epilogue." << endl
+ << "//" << endl;
+ if (e)
+ append (cxx, ops.cxx_epilogue ()[db]);
+ if (ef)
+ append (cxx, ops.cxx_epilogue_file ()[db]);
+ cxx << "//" << endl
+ << "// End epilogue." << endl
+ << endl;
+ }
}
cxx << "#include <odb/post.hxx>" << endl;
@@ -535,15 +586,22 @@ generate (options const& ops,
// Copy prologue.
//
- if (!ops.schema_prologue ().empty () ||
- !ops.schema_prologue_file ().empty ())
{
- sch << "// Begin prologue." << endl
- << "//" << endl;
- append (sch, ops.schema_prologue (), ops.schema_prologue_file ());
- sch << "//" << endl
- << "// End prologue." << endl
- << endl;
+ bool p (ops.schema_prologue ().count (db) != 0);
+ bool pf (ops.schema_prologue_file ().count (db) != 0);
+
+ if (p || pf)
+ {
+ sch << "// Begin prologue." << endl
+ << "//" << endl;
+ if (p)
+ append (sch, ops.schema_prologue ()[db]);
+ if (pf)
+ append (sch, ops.schema_prologue_file ()[db]);
+ sch << "//" << endl
+ << "// End prologue." << endl
+ << endl;
+ }
}
sch << "#include " << ctx->process_include_path (hxx_name) << endl
@@ -574,15 +632,22 @@ generate (options const& ops,
// Copy epilogue.
//
- if (!ops.schema_epilogue ().empty () ||
- !ops.schema_epilogue_file ().empty ())
{
- sch << "// Begin epilogue." << endl
- << "//" << endl;
- append (sch, ops.schema_epilogue (), ops.schema_epilogue_file ());
- sch << "//" << endl
- << "// End epilogue." << endl
- << endl;
+ bool e (ops.schema_epilogue ().count (db) != 0);
+ bool ef (ops.schema_epilogue_file ().count (db) != 0);
+
+ if (e || ef)
+ {
+ sch << "// Begin epilogue." << endl
+ << "//" << endl;
+ if (e)
+ append (sch, ops.schema_epilogue ()[db]);
+ if (ef)
+ append (sch, ops.schema_epilogue_file ()[db]);
+ sch << "//" << endl
+ << "// End epilogue." << endl
+ << endl;
+ }
}
sch << "#include <odb/post.hxx>" << endl;
@@ -616,15 +681,22 @@ generate (options const& ops,
// Copy prologue.
//
- if (!ops.sql_prologue ().empty () ||
- !ops.sql_prologue_file ().empty ())
{
- sql << "/* Begin prologue." << endl
- << " */" << endl;
- append (sql, ops.sql_prologue (), ops.sql_prologue_file ());
- sql << "/*" << endl
- << " * End prologue. */" << endl
- << endl;
+ bool p (ops.sql_prologue ().count (db) != 0);
+ bool pf (ops.sql_prologue_file ().count (db) != 0);
+
+ if (p || pf)
+ {
+ sql << "/* Begin prologue." << endl
+ << " */" << endl;
+ if (p)
+ append (sql, ops.sql_prologue ()[db]);
+ if (pf)
+ append (sql, ops.sql_prologue_file ()[db]);
+ sql << "/*" << endl
+ << " * End prologue. */" << endl
+ << endl;
+ }
}
if (!ops.omit_drop ())
@@ -632,15 +704,22 @@ generate (options const& ops,
// Copy interlude.
//
- if (!ops.sql_interlude ().empty () ||
- !ops.sql_interlude_file ().empty ())
{
- sql << "/* Begin interlude." << endl
- << " */" << endl;
- append (sql, ops.sql_interlude (), ops.sql_interlude_file ());
- sql << "/*" << endl
- << " * End interlude. */" << endl
- << endl;
+ bool i (ops.sql_interlude ().count (db) != 0);
+ bool ifl (ops.sql_interlude_file ().count (db) != 0);
+
+ if (i || ifl)
+ {
+ sql << "/* Begin interlude." << endl
+ << " */" << endl;
+ if (i)
+ append (sql, ops.sql_interlude ()[db]);
+ if (ifl)
+ append (sql, ops.sql_interlude_file ()[db]);
+ sql << "/*" << endl
+ << " * End interlude. */" << endl
+ << endl;
+ }
}
if (!ops.omit_create ())
@@ -648,15 +727,22 @@ generate (options const& ops,
// Copy epilogue.
//
- if (!ops.sql_epilogue ().empty () ||
- !ops.sql_epilogue_file ().empty ())
{
- sql << "/* Begin epilogue." << endl
- << " */" << endl;
- append (sql, ops.sql_epilogue (), ops.sql_epilogue_file ());
- sql << "/*" << endl
- << " * End epilogue. */" << endl
- << endl;
+ bool e (ops.sql_epilogue ().count (db) != 0);
+ bool ef (ops.sql_epilogue_file ().count (db) != 0);
+
+ if (e || ef)
+ {
+ sql << "/* Begin epilogue." << endl
+ << " */" << endl;
+ if (e)
+ append (sql, ops.sql_epilogue ()[db]);
+ if (ef)
+ append (sql, ops.sql_epilogue_file ()[db]);
+ sql << "/*" << endl
+ << " * End epilogue. */" << endl
+ << endl;
+ }
}
relational::schema::generate_epilogue ();
diff --git a/odb/include.cxx b/odb/include.cxx
index c842010..a921451 100644
--- a/odb/include.cxx
+++ b/odb/include.cxx
@@ -169,6 +169,7 @@ namespace
{
string f (file);
size_t n (f.size ());
+ database db (options_.database ()[0]);
// Check if we have a synthesized prologue/epilogue fragment.
//
@@ -201,18 +202,18 @@ namespace
size_t size (options_.odb_prologue ().size ());
if (n < size)
- ss << options_.odb_prologue ()[n];
+ ss << options_.odb_prologue ()[db][n];
else
- f = options_.odb_prologue_file ()[n - size];
+ f = options_.odb_prologue_file ()[db][n - size];
}
else
{
size_t size (options_.odb_epilogue ().size ());
if (n < size)
- ss << options_.odb_epilogue ()[n];
+ ss << options_.odb_epilogue ()[db][n];
else
- f = options_.odb_epilogue_file ()[n - size];
+ f = options_.odb_epilogue_file ()[db][n - size];
}
if (f.empty ())
diff --git a/odb/odb.cxx b/odb/odb.cxx
index b80f577..260f9b8 100644
--- a/odb/odb.cxx
+++ b/odb/odb.cxx
@@ -817,38 +817,45 @@ main (int argc, char* argv[])
// NOTE: if you change the format, you also need to update code
// in include.cxx
//
- strings const& pro (ops.odb_prologue ());
- for (size_t i (0); i < pro.size (); ++i)
+ size_t pro_count (1);
+ if (ops.odb_prologue ().count (db) != 0)
{
- os << "#line 1 \"<odb-prologue-" << i + 1 << ">\"" << endl
- << pro[i] << endl;
+ strings const& pro (ops.odb_prologue ()[db]);
+ for (size_t i (0); i < pro.size (); ++i, ++pro_count)
+ {
+ os << "#line 1 \"<odb-prologue-" << pro_count << ">\"" << endl
+ << pro[i] << endl;
+ }
}
- strings const& prof (ops.odb_prologue_file ());
- for (size_t i (0); i < prof.size (); ++i)
+ if (ops.odb_prologue_file ().count (db) != 0)
{
- os << "#line 1 \"<odb-prologue-" << pro.size () + i + 1 << ">\""
- << endl;
+ strings const& prof (ops.odb_prologue_file ()[db]);
+ for (size_t i (0); i < prof.size (); ++i, ++pro_count)
+ {
+ os << "#line 1 \"<odb-prologue-" << pro_count << ">\""
+ << endl;
- ifstream ifs (prof[i].c_str (), ios_base::in | ios_base::binary);
+ ifstream ifs (prof[i].c_str (), ios_base::in | ios_base::binary);
- if (!ifs.is_open ())
- {
- e << prof[i] << ": error: unable to open in read mode" << endl;
- fb.close ();
- wait_process (pi, argv[0]);
- return 1;
- }
+ if (!ifs.is_open ())
+ {
+ e << prof[i] << ": error: unable to open in read mode" << endl;
+ fb.close ();
+ wait_process (pi, argv[0]);
+ return 1;
+ }
- if (!(os << ifs.rdbuf ()))
- {
- e << prof[i] << ": error: io failure" << endl;
- fb.close ();
- wait_process (pi, argv[0]);
- return 1;
- }
+ if (!(os << ifs.rdbuf ()))
+ {
+ e << prof[i] << ": error: io failure" << endl;
+ fb.close ();
+ wait_process (pi, argv[0]);
+ return 1;
+ }
- os << endl;
+ os << endl;
+ }
}
if (at_once)
@@ -887,38 +894,45 @@ main (int argc, char* argv[])
// NOTE: if you change the format, you also need to update code
// in include.cxx
//
- strings const& epi (ops.odb_epilogue ());
- for (size_t i (0); i < epi.size (); ++i)
+ size_t epi_count (1);
+ if (ops.odb_epilogue ().count (db) != 0)
{
- os << "#line 1 \"<odb-epilogue-" << i + 1 << ">\"" << endl
- << epi[i] << endl;
+ strings const& epi (ops.odb_epilogue ()[db]);
+ for (size_t i (0); i < epi.size (); ++i, ++epi_count)
+ {
+ os << "#line 1 \"<odb-epilogue-" << epi_count << ">\"" << endl
+ << epi[i] << endl;
+ }
}
- strings const& epif (ops.odb_epilogue_file ());
- for (size_t i (0); i < epif.size (); ++i)
+ if (ops.odb_epilogue_file ().count (db) != 0)
{
- os << "#line 1 \"<odb-epilogue-" << epi.size () + i + 1 << ">\""
- << endl;
+ strings const& epif (ops.odb_epilogue_file ()[db]);
+ for (size_t i (0); i < epif.size (); ++i, ++epi_count)
+ {
+ os << "#line 1 \"<odb-epilogue-" << epi_count << ">\""
+ << endl;
- ifstream ifs (epif[i].c_str (), ios_base::in | ios_base::binary);
+ ifstream ifs (epif[i].c_str (), ios_base::in | ios_base::binary);
- if (!ifs.is_open ())
- {
- e << epif[i] << ": error: unable to open in read mode" << endl;
- fb.close ();
- wait_process (pi, argv[0]);
- return 1;
- }
+ if (!ifs.is_open ())
+ {
+ e << epif[i] << ": error: unable to open in read mode" << endl;
+ fb.close ();
+ wait_process (pi, argv[0]);
+ return 1;
+ }
- if (!(os << ifs.rdbuf ()))
- {
- e << epif[i] << ": error: io failure" << endl;
- fb.close ();
- wait_process (pi, argv[0]);
- return 1;
- }
+ if (!(os << ifs.rdbuf ()))
+ {
+ e << epif[i] << ": error: io failure" << endl;
+ fb.close ();
+ wait_process (pi, argv[0]);
+ return 1;
+ }
- os << endl;
+ os << endl;
+ }
}
if (!ops.trace ())
diff --git a/odb/option-functions.cxx b/odb/option-functions.cxx
index aca4943..b0b2523 100644
--- a/odb/option-functions.cxx
+++ b/odb/option-functions.cxx
@@ -22,9 +22,9 @@ process_options (options& o)
// Set the default schema format depending on the database.
//
- if (o.generate_schema () && o.schema_format ().empty ())
+ if (o.generate_schema () && o.schema_format ()[db].empty ())
{
- set<schema_format> f;
+ set<schema_format>& f (o.schema_format ()[db]);
switch (db)
{
@@ -46,10 +46,13 @@ process_options (options& o)
break;
}
}
-
- o.schema_format (f);
}
+ // Set default --schema-name value.
+ //
+ if (o.schema_name ().count (db) == 0)
+ o.schema_name ()[db] = "";
+
// Set default --*--file-suffix values.
//
{
diff --git a/odb/option-parsers.hxx b/odb/option-parsers.hxx
index a8fc1f5..a974a7c 100644
--- a/odb/option-parsers.hxx
+++ b/odb/option-parsers.hxx
@@ -5,6 +5,7 @@
#ifndef ODB_OPTION_PARSERS_HXX
#define ODB_OPTION_PARSERS_HXX
+#include <vector>
#include <sstream>
#include <odb/option-types.hxx>
@@ -12,72 +13,183 @@
namespace cli
{
+ // Return true if there is a database prefix.
+ //
+ template <typename V>
+ bool
+ parse_option_value (std::string const& o, std::string const& ov,
+ database& k, V& v)
+ {
+ bool r (false);
+ std::string::size_type p = ov.find (':');
+
+ std::string vstr;
+ if (p != std::string::npos)
+ {
+ std::string kstr (ov, 0, p);
+
+ // See if this prefix resolves to the database name. If not,
+ // assume there is no prefix.
+ //
+ std::istringstream ks (kstr);
+
+ if (ks >> k && ks.eof ())
+ {
+ r = true;
+ vstr.assign (ov, p + 1, std::string::npos);
+ }
+ }
+
+ if (!r)
+ vstr = ov; // Use the whole value.
+
+ if (!vstr.empty ())
+ {
+ std::istringstream vs (vstr);
+
+ if (!(vs >> v && vs.eof ()))
+ throw invalid_value (o, ov);
+ }
+ else
+ v = V ();
+
+ return r;
+ }
+
+ // Specialization for std::string.
+ //
+ bool
+ parse_option_value (std::string const&, std::string const& ov,
+ database& k, std::string& v)
+ {
+ bool r (false);
+ std::string::size_type p = ov.find (':');
+
+ if (p != std::string::npos)
+ {
+ std::string kstr (ov, 0, p);
+
+ // See if this prefix resolves to the database name. If not,
+ // assume there is no prefix.
+ //
+ std::istringstream ks (kstr);
+
+ if (ks >> k && ks.eof ())
+ {
+ r = true;
+ v.assign (ov, p + 1, std::string::npos);
+ }
+ }
+
+ if (!r)
+ v = ov; // Use the whole value.
+
+ return r;
+ }
+
template <typename V>
struct parser<database_map<V> >
{
+ typedef database_map<V> map;
+
static void
- parse (database_map<V>& m, bool& xs, scanner& s)
+ parse (map& m, bool& xs, scanner& s)
{
- typedef database_map<V> map;
-
xs = true;
std::string o (s.next ());
if (s.more ())
{
- std::string ov (s.next ());
- std::string::size_type p = ov.find (':');
+ database k;
+ V v;
- if (p != std::string::npos)
+ if (parse_option_value (o, s.next (), k, v))
+ m[k] = v; // Override any old value.
+ else
{
- std::string kstr (ov, 0, p);
- std::string vstr (ov, p + 1);
-
- // See if this prefix resolves to the database name. If not,
- // assume there is no prefix.
+ // No database prefix is specified which means it applies to
+ // all the databases. We also don't want to override database-
+ // specific values, so use insert().
//
- database k;
- std::istringstream ks (kstr);
+ m.insert (typename map::value_type (database::common, v));
+ m.insert (typename map::value_type (database::mssql, v));
+ m.insert (typename map::value_type (database::mysql, v));
+ m.insert (typename map::value_type (database::oracle, v));
+ m.insert (typename map::value_type (database::pgsql, v));
+ m.insert (typename map::value_type (database::sqlite, v));
+ }
+ }
+ else
+ throw missing_value (o);
+ }
+ };
- if (ks >> k && ks.eof ())
- {
- V v = V ();
+ template <typename V>
+ struct parser<database_map<std::vector<V> > >
+ {
+ typedef database_map<std::vector<V> > map;
- if (!vstr.empty ())
- {
- std::istringstream vs (vstr);
+ static void
+ parse (map& m, bool& xs, scanner& s)
+ {
+ xs = true;
+ std::string o (s.next ());
- if (!(vs >> v && vs.eof ()))
- throw invalid_value (o, ov);
- }
+ if (s.more ())
+ {
+ database k;
+ V v;
- m[k] = v; // Override any old value.
- return;
- }
+ if (parse_option_value (o, s.next (), k, v))
+ m[k].push_back (v);
+ else
+ {
+ // No database prefix is specified which means it applies to
+ // all the databases.
+ //
+ m[database::common].push_back (v);
+ m[database::mssql].push_back (v);
+ m[database::mysql].push_back (v);
+ m[database::oracle].push_back (v);
+ m[database::pgsql].push_back (v);
+ m[database::sqlite].push_back (v);
}
+ }
+ else
+ throw missing_value (o);
+ }
+ };
- // No database prefix is specified which means it applies to
- // all the databases.
- //
- V v = V ();
+ template <typename V>
+ struct parser<database_map<std::set<V> > >
+ {
+ typedef database_map<std::set<V> > map;
- if (!ov.empty ())
- {
- std::istringstream vs (ov);
+ static void
+ parse (map& m, bool& xs, scanner& s)
+ {
+ xs = true;
+ std::string o (s.next ());
- if (!(vs >> v && vs.eof ()))
- throw invalid_value (o, ov);
- }
+ if (s.more ())
+ {
+ database k;
+ V v;
- // We don't want to override database-specific values, so use
- // insert().
- //
- m.insert (typename map::value_type (database::common, v));
- m.insert (typename map::value_type (database::mssql, v));
- m.insert (typename map::value_type (database::mysql, v));
- m.insert (typename map::value_type (database::oracle, v));
- m.insert (typename map::value_type (database::pgsql, v));
- m.insert (typename map::value_type (database::sqlite, v));
+ if (parse_option_value (o, s.next (), k, v))
+ m[k].insert (v);
+ else
+ {
+ // No database prefix is specified which means it applies to
+ // all the databases.
+ //
+ m[database::common].insert (v);
+ m[database::mssql].insert (v);
+ m[database::mysql].insert (v);
+ m[database::oracle].insert (v);
+ m[database::pgsql].insert (v);
+ m[database::sqlite].insert (v);
+ }
}
else
throw missing_value (o);
@@ -85,5 +197,4 @@ namespace cli
};
}
-
#endif // ODB_OPTION_PARSERS_HXX
diff --git a/odb/options.cli b/odb/options.cli
index 61d8371..3c7efa8 100644
--- a/odb/options.cli
+++ b/odb/options.cli
@@ -45,7 +45,7 @@ class options
//
// Plugin options.
//
- std::vector< ::database > --database | -d
+ std::vector< ::database> --database | -d
{
"<db>",
"Generate code for the <db> database. Valid values are \cb{mssql},
@@ -113,7 +113,7 @@ class options
for details)."
};
- std::set< ::schema_format> --schema-format
+ database_map<std::set< ::schema_format> > --schema-format
{
"<format>",
"Generate the database schema in the specified format. Pass \cb{sql} as
@@ -137,7 +137,7 @@ class options
"Omit \cb{CREATE} statements from the generated database schema."
};
- std::string --schema-name = ""
+ database_map<std::string> --schema-name
{
"<name>",
"Use <name> as the database schema name. Schema names are primarily
@@ -210,7 +210,7 @@ class options
\cb{#include} directive resolution."
};
- qname --schema
+ database_map<qname> --schema
{
"<schema>",
"Specify a database schema (database namespace) that should be
@@ -220,7 +220,7 @@ class options
\cb{--schema-name} option."
};
- std::string --table-prefix
+ database_map<std::string> --table-prefix
{
"<prefix>",
"Add <prefix> to table and index names. The prefix is added to both
@@ -306,31 +306,31 @@ class options
// Prologues.
//
- std::vector<std::string> --hxx-prologue
+ database_map<std::vector<std::string> > --hxx-prologue
{
"<text>",
"Insert <text> at the beginning of the generated C++ header file."
};
- std::vector<std::string> --ixx-prologue
+ database_map<std::vector<std::string> > --ixx-prologue
{
"<text>",
"Insert <text> at the beginning of the generated C++ inline file."
};
- std::vector<std::string> --cxx-prologue
+ database_map<std::vector<std::string> > --cxx-prologue
{
"<text>",
"Insert <text> at the beginning of the generated C++ source file."
};
- std::vector<std::string> --schema-prologue
+ database_map<std::vector<std::string> > --schema-prologue
{
"<text>",
"Insert <text> at the beginning of the generated schema C++ source file."
};
- std::vector<std::string> --sql-prologue
+ database_map<std::vector<std::string> > --sql-prologue
{
"<text>",
"Insert <text> at the beginning of the generated database schema file."
@@ -338,7 +338,7 @@ class options
// Interludes.
//
- std::vector<std::string> --sql-interlude
+ database_map<std::vector<std::string> > --sql-interlude
{
"<text>",
"Insert <text> after all the \cb{DROP} and before any \cb{CREATE}
@@ -347,31 +347,31 @@ class options
// Epilogues.
//
- std::vector<std::string> --hxx-epilogue
+ database_map<std::vector<std::string> > --hxx-epilogue
{
"<text>",
"Insert <text> at the end of the generated C++ header file."
};
- std::vector<std::string> --ixx-epilogue
+ database_map<std::vector<std::string> > --ixx-epilogue
{
"<text>",
"Insert <text> at the end of the generated C++ inline file."
};
- std::vector<std::string> --cxx-epilogue
+ database_map<std::vector<std::string> > --cxx-epilogue
{
"<text>",
"Insert <text> at the end of the generated C++ source file."
};
- std::vector<std::string> --schema-epilogue
+ database_map<std::vector<std::string> > --schema-epilogue
{
"<text>",
"Insert <text> at the end of the generated schema C++ source file."
};
- std::vector<std::string> --sql-epilogue
+ database_map<std::vector<std::string> > --sql-epilogue
{
"<text>",
"Insert <text> at the end of the generated database schema file."
@@ -379,35 +379,35 @@ class options
// Prologue files.
//
- std::string --hxx-prologue-file
+ database_map<std::string> --hxx-prologue-file
{
"<file>",
"Insert the content of <file> at the beginning of the generated C++
header file."
};
- std::string --ixx-prologue-file
+ database_map<std::string> --ixx-prologue-file
{
"<file>",
"Insert the content of <file> at the beginning of the generated C++
inline file."
};
- std::string --cxx-prologue-file
+ database_map<std::string> --cxx-prologue-file
{
"<file>",
"Insert the content of <file> at the beginning of the generated C++
source file."
};
- std::string --schema-prologue-file
+ database_map<std::string> --schema-prologue-file
{
"<file>",
"Insert the content of <file> at the beginning of the generated schema
C++ source file."
};
- std::string --sql-prologue-file
+ database_map<std::string> --sql-prologue-file
{
"<file>",
"Insert the content of <file> at the beginning of the generated
@@ -416,7 +416,7 @@ class options
// Interlude files.
//
- std::string --sql-interlude-file
+ database_map<std::string> --sql-interlude-file
{
"<file>",
"Insert the content of <file> after all the \cb{DROP} and before any
@@ -425,35 +425,35 @@ class options
// Epilogue files.
//
- std::string --hxx-epilogue-file
+ database_map<std::string> --hxx-epilogue-file
{
"<file>",
"Insert the content of <file> at the end of the generated C++ header
file."
};
- std::string --ixx-epilogue-file
+ database_map<std::string> --ixx-epilogue-file
{
"<file>",
"Insert the content of <file> at the end of the generated C++ inline
file."
};
- std::string --cxx-epilogue-file
+ database_map<std::string> --cxx-epilogue-file
{
"<file>",
"Insert the content of <file> at the end of the generated C++ source
file."
};
- std::string --schema-epilogue-file
+ database_map<std::string> --schema-epilogue-file
{
"<file>",
"Insert the content of <file> at the end of the generated schema C++
source file."
};
- std::string --sql-epilogue-file
+ database_map<std::string> --sql-epilogue-file
{
"<file>",
"Insert the content of <file> at the end of the generated database
@@ -462,7 +462,7 @@ class options
// ODB compilation prologue/epilogue.
//
- std::vector<std::string> --odb-prologue
+ database_map<std::vector<std::string> > --odb-prologue
{
"<text>",
"Compile <text> before the input header file. This option allows you
@@ -470,7 +470,7 @@ class options
to the ODB compilation process."
};
- std::vector<std::string> --odb-prologue-file
+ database_map<std::vector<std::string> > --odb-prologue-file
{
"<file>",
"Compile <file> contents before the input header file. Prologue files
@@ -478,7 +478,7 @@ class options
option)."
};
- std::vector<std::string> --odb-epilogue
+ database_map<std::vector<std::string> > --odb-epilogue
{
"<text>",
"Compile <text> after the input header file. This option allows you
@@ -486,7 +486,7 @@ class options
to the ODB compilation process."
};
- std::vector<std::string> --odb-epilogue-file
+ database_map<std::vector<std::string> > --odb-epilogue-file
{
"<file>",
"Compile <file> contents after the input header file. Epilogue files
diff --git a/odb/relational/schema.hxx b/odb/relational/schema.hxx
index a236fd5..63ab860 100644
--- a/odb/relational/schema.hxx
+++ b/odb/relational/schema.hxx
@@ -943,7 +943,7 @@ namespace relational
os << "static const schema_catalog_entry" << endl
<< "schema_catalog_entry_" << flat_name (type) << "_ (" << endl
<< "id_" << db << "," << endl
- << strlit (options.schema_name ()) << "," << endl
+ << strlit (options.schema_name ()[db]) << "," << endl
<< "&" << traits << "::create_schema);"
<< endl;
}
diff --git a/odb/validator.cxx b/odb/validator.cxx
index d0b9ee2..5e99114 100644
--- a/odb/validator.cxx
+++ b/odb/validator.cxx
@@ -968,7 +968,7 @@ validate (options const& ops,
//
if (ops.generate_schema_only () &&
(ops.schema_format ().size () != 1 ||
- *ops.schema_format ().begin () != schema_format::sql))
+ *ops.schema_format ()[db].begin () != schema_format::sql))
{
cerr << "error: --generate-schema-only is only valid when generating " <<
"schema as a standalone SQL file" << endl;