[libgda] Improved documentation related to blobs



commit f189bef71385fa9de4103afe60a2f7bf81ad0d37
Author: Vivien Malerba <malerba gnome-db org>
Date:   Fri Sep 17 19:15:14 2010 +0200

    Improved documentation related to blobs

 doc/C/Makefile.am           |    3 +-
 doc/C/examples/blobtest.c   |  192 +++++++++++++++++++++++++++++++++++++++++++
 doc/C/gettingstarted.xml    |   16 ++++
 doc/C/libgda-4.0-docs.sgml  |   20 +++--
 doc/C/limitations.xml       |   14 +++-
 doc/C/prov-notes.xml        |   39 +++++++++-
 doc/C/tmpl/gda-blob-op.sgml |    2 +-
 libgda/gda-value.c          |    8 +-
 8 files changed, 279 insertions(+), 15 deletions(-)
---
diff --git a/doc/C/Makefile.am b/doc/C/Makefile.am
index cc7ab31..7d43fab 100644
--- a/doc/C/Makefile.am
+++ b/doc/C/Makefile.am
@@ -83,7 +83,8 @@ EXTRA_DIST += examples/full_example.c installation.xml limitations.xml migration
 	stmt-update.svg stmt-compound.svg information_schema.svg howto-exec.svg thread-wrapper.svg \
 	version.xml.in \
 	visual_index.xml prov-notes.xml \
-	libgda-sections.txt libgda-ui-sections.txt libgda.types libgda.types.in libgda-ui.types
+	libgda-sections.txt libgda-ui-sections.txt libgda.types libgda.types.in libgda-ui.types \
+	examples/blobtest.c
 
 # Files not to distribute
 # for --rebuild-types in $(SCAN_OPTIONS), e.g. $(DOC_MODULE).types
diff --git a/doc/C/examples/blobtest.c b/doc/C/examples/blobtest.c
new file mode 100644
index 0000000..b92a74a
--- /dev/null
+++ b/doc/C/examples/blobtest.c
@@ -0,0 +1,192 @@
+<![CDATA[#include <libgda/libgda.h>
+#include <libgda/sql-parser/gda-sql-parser.h>
+#include <libgda/gda-blob-op.h>
+
+GdaConnection *open_connection (void);
+static gboolean do_store (GdaConnection *cnc, const gchar *filename, GError **error);
+static gboolean do_fetch (GdaConnection *cnc, gint id, GError **error);
+
+int
+main (int argc, char *argv[])
+{
+        GdaConnection *cnc;
+	const gchar *filename = NULL;
+	gint id = 0;
+	gboolean store;
+	GError *error = NULL;
+	gboolean result;
+
+	/* parse arguments */
+	if (argc != 3)
+		goto help;
+	if (! g_ascii_strcasecmp (argv[1], "store")) {
+		filename = argv[2];
+		store = TRUE;
+	}
+	else if (! g_ascii_strcasecmp (argv[1], "fetch")) {
+		id = atoi (argv[2]);
+		store = FALSE;
+	}
+	else
+		goto help;
+
+	/* do the job */
+        gda_init ();
+	cnc = open_connection ();
+	if (store)
+		result = do_store (cnc, filename, &error);
+	else
+		result = do_fetch (cnc, id, &error);
+        gda_connection_close (cnc);
+
+	if (!result) {
+		g_print ("ERROR: %s\n", error && error->message ? error->message : "No detail");
+		g_clear_error (&error);
+	}
+	else
+		g_print ("Ok.\n");
+
+        return result ? 0 : 1;
+
+ help:
+	g_print ("%s [store <filename> | fetch <ID>]\n", argv[0]);
+	return 0;
+}
+
+/*
+ * Open a connection to the example.db file
+ */
+GdaConnection *
+open_connection (void)
+{
+        GdaConnection *cnc;
+        GError *error = NULL;
+        cnc = gda_connection_open_from_string ("SQLite", "DB_DIR=.;DB_NAME=testblob", NULL,
+					       GDA_CONNECTION_OPTIONS_NONE,
+					       &error);
+        if (!cnc) {
+                g_print ("Could not open connection to SQLite database in testblob.db file: %s\n",
+                         error && error->message ? error->message : "No detail");
+                exit (1);
+        }
+        return cnc;
+}
+
+static gboolean
+do_store (GdaConnection *cnc, const gchar *filename, GError **error)
+{
+	if (! g_file_test (filename, G_FILE_TEST_EXISTS) ||
+	    g_file_test (filename, G_FILE_TEST_IS_DIR)) {
+		g_set_error (error, 0, 0,
+			     "File does not exist or is a directory");
+		return FALSE;
+	}
+
+	GdaSqlParser *parser;
+	GdaStatement *stmt;
+	GdaSet *params, *newrow;
+	GdaHolder *holder;
+	GValue *value;
+	GdaBlob *blob;
+	gint res;
+
+	parser = gda_sql_parser_new ();
+	stmt = gda_sql_parser_parse_string (parser,
+					    "INSERT INTO blobstable (data) VALUES (##blob::blob)",
+					    NULL, error);
+	g_object_unref (parser);
+	if (!stmt)
+		return FALSE;
+
+	if (! gda_statement_get_parameters (stmt, &params, error)) {
+		g_object_unref (stmt);
+		return FALSE;
+	}
+
+	holder = gda_set_get_holder (params, "blob");
+	value = gda_value_new_blob_from_file (filename);
+	blob = (GdaBlob*) gda_value_get_blob (value);
+	g_assert (gda_holder_take_value (holder, value, NULL));
+
+	g_print ("STORING file '%s' to database BLOB\n", filename);
+	res = gda_connection_statement_execute_non_select (cnc, stmt, params, &newrow, error);
+	g_object_unref (params);
+	g_object_unref (stmt);
+
+	if (newrow) {
+		GSList *list;
+		g_print ("Inserted row is (for each numbered column in the table):\n");
+		for (list = newrow->holders; list; list = list->next) {
+			const GValue *value;
+			gchar *tmp;
+			value = gda_holder_get_value (GDA_HOLDER (list->data));
+			tmp = gda_value_stringify (value);
+			g_print ("  [%s] = [%s]\n", gda_holder_get_id (GDA_HOLDER (list->data)), tmp);
+			g_free (tmp);
+		}
+		g_object_unref (newrow);
+	}
+	else
+		g_print ("Provider did not return the inserted row\n");
+
+	return (res == -1) ? FALSE : TRUE;
+}
+
+static gboolean
+do_fetch (GdaConnection *cnc, gint id, GError **error)
+{
+	GdaSqlParser *parser;
+	GdaStatement *stmt;
+	GdaSet *params;
+	GdaDataModel *model;
+	const GValue *value;
+	GdaBlob *blob;
+	gboolean result = TRUE;
+
+	gchar *filename;
+	filename = g_strdup_printf ("fetched_%d", id);
+	g_print ("FETCHING BLOB with ID %d to file '%s'\n", id, filename);
+
+	parser = gda_sql_parser_new ();
+	stmt = gda_sql_parser_parse_string (parser,
+					    "SELECT data FROM blobstable WHERE id=##id::int",
+					    NULL, error);
+	g_object_unref (parser);
+	if (!stmt)
+		return FALSE;
+
+	if (! gda_statement_get_parameters (stmt, &params, error)) {
+		g_object_unref (stmt);
+		return FALSE;
+	}
+	g_assert (gda_set_set_holder_value (params, NULL, "id", id));
+	model = gda_connection_statement_execute_select (cnc, stmt, params, error);
+	g_object_unref (params);
+	g_object_unref (stmt);
+	if (! model)
+		return FALSE;
+
+	value = gda_data_model_get_value_at (model, 0, 0, error);
+	if (!value) {
+		g_object_unref (model);
+		return FALSE;
+	}
+	g_assert (G_VALUE_TYPE (value) == GDA_TYPE_BLOB);
+	
+	blob = (GdaBlob*) gda_value_get_blob (value);
+	if (blob->op) {
+		GValue *dest_value;
+		GdaBlob *dest_blob;
+		
+		dest_value = gda_value_new_blob_from_file (filename);
+		dest_blob = (GdaBlob*) gda_value_get_blob (dest_value);
+		result = gda_blob_op_write_all (dest_blob->op, (GdaBlob*) blob);
+		gda_value_free (dest_value);
+	}
+	else
+		result = g_file_set_contents (filename, (gchar *) ((GdaBinary*)blob)->data,
+					     ((GdaBinary*)blob)->binary_length, error);
+	g_free (filename);
+	return result;
+}
+]]>
diff --git a/doc/C/gettingstarted.xml b/doc/C/gettingstarted.xml
index d759a91..8ad2b87 100644
--- a/doc/C/gettingstarted.xml
+++ b/doc/C/gettingstarted.xml
@@ -451,6 +451,22 @@ create_table (GdaConnection *cnc)
     </para>
   </sect1>
 
+  <sect1 id="blobs_example">
+    <title>Binary large objects (BLOBs) example</title>
+    <para>
+       The following example is available in the <filename>samples/Blobs</filename> source directory
+       and illustrate how to manipulate BLOBs. This test program offers 2 operations which are to
+       store the contents of a file to the database (which returns the ID of the stored data), and to
+       fetch a stored data from the database from its ID (which creates a fetched_&lt;ID&gt; file).
+    </para>
+    <para>
+      The code is:
+      <programlisting>
+&blobsexample;
+      </programlisting>
+    </para>
+  </sect1>
+
   <sect1 id="other_examples">
     <title>Other examples</title>
     <para>
diff --git a/doc/C/libgda-4.0-docs.sgml b/doc/C/libgda-4.0-docs.sgml
index 49d6e5f..500e638 100644
--- a/doc/C/libgda-4.0-docs.sgml
+++ b/doc/C/libgda-4.0-docs.sgml
@@ -35,6 +35,7 @@
 <!ENTITY gda-sql "<command>gda-sql</command>">
 <!ENTITY gda-test-connection "<command>gda-test-connection-4.0</command>">
 <!ENTITY fullexample SYSTEM "examples/full_example.c">
+<!ENTITY blobsexample SYSTEM "examples/blobtest.c">
 <!ENTITY install SYSTEM "installation.xml">
 <!ENTITY examples SYSTEM "gettingstarted.xml">
 <!ENTITY migration SYSTEM "migration.xml">
@@ -514,15 +515,16 @@
 	</para>
       </sect1>
 
-      <sect1 id="gen:blobs">
+      <sect1 id="gen_blobs">
 	<title>Binary large objects (BLOBs)</title>
 	<para>
-	  Binary large objects (BLOBs) are potentially very big (serevral GigaBytes) pieces of binary
+	  Binary large objects (BLOBs) are potentially very big (several GigaBytes) pieces of binary
 	  data which databases can store. Because of their potential size, they are not manipulated
-	  like any other data, and are usually not transfered when running a statement: first
-	  a reference to a blob is created for an existing or a new blob using a statement, and then
-	  that reference is used, <emphasis>after the statement has been executed</emphasis>, to
-	  get or set some parts of the blob.
+	  like any other data, and are usually not transfered to the database engine at the same time
+	  as the SQL text is transfered: first
+	  a reference to a blob is created for an existing or a new blob, and then
+	  that reference is used, <emphasis>before or after the statement is being executed,
+	    depending on the database engine accessed</emphasis>, to get or set some parts of the blob.
 	</para>
 	<para>
 	  &LIBGDA; makes its best to hide the complexity of this and most of the time blobs can be used
@@ -539,6 +541,12 @@
 		contents.</para></listitem>
 	  </itemizedlist>
 	</para>
+	<para>
+	  Please consult each database provider's <link linkend="provider-notes">notes</link> and
+	  <link linkend="limitations">limitations</link> for database specific rules
+	  regarding BLOBs. Also have a look at the
+	  <link linkend="blobs_example">Blobs manipulation source code example</link>.
+	</para>
       </sect1>
 
     </chapter>
diff --git a/doc/C/limitations.xml b/doc/C/limitations.xml
index 9e6fd6c..e510951 100644
--- a/doc/C/limitations.xml
+++ b/doc/C/limitations.xml
@@ -73,7 +73,7 @@
 	      blob. If this is not the case, then you must cast it to the correct data type within your SQL query (for example
 	      as "SELECT oid::varchar FROM...")</para></listitem>
 	  <listitem><para>&LIBGDA; does not try to avoid orphaned BLOBs, to solve the problem of orphaned BLOBs, set up
-	      the "lo" extension from PöstgreSQL's contrib, and/or use the "vacuumlo" also from the contrib.</para>
+	      the "lo" extension from PostgreSQL's contrib, and/or use the "vacuumlo" also from the contrib.</para>
 	  <para>Note that in the future the PostgreSQL's provider will have an option to ensure that there are no
 	    orphaned BLOBs itself.</para></listitem>
 	  <listitem><para>BLOB manipulations can only occur within a transaction, 
@@ -85,7 +85,9 @@
 
     <sect2><title>Last inserted row's values</title>
       <para>
-	The <link linkend="gda-connection-statement-execute-non-select">gda_connection_statement_execute_non_select()</link>'s last_insert_row attribute will return a <link linkend="GdaSet">GdaSet</link> object only if the table has OIDs.
+	The <link linkend="gda-connection-statement-execute-non-select">gda_connection_statement_execute_non_select()</link>'s
+	last_insert_row parameter will return a new <link linkend="GdaSet">GdaSet</link> object only if the table has OIDs
+	(to ensure this, the "WITH OIDS" option should be added at the end of a CREATE TABLE query).
       </para>
     </sect2>
 
@@ -133,6 +135,14 @@
 	flag, then &LIBGDA; sets the SQLite library in a state where multi threading is fully supported.
       </para>
     </sect2>
+
+    <sect2><title>Error reporting</title>
+      <para>
+	If there is not enough free space on the disk which stores an SQLite database, you may have some
+	"Disk I/O error" errors. This is also true for meta data connections which are by default
+	created in a temporary location.
+      </para>
+    </sect2>
   </sect1>
 
   <sect1 id="limitations_sqlcipher"><title>For SqlCipher</title>
diff --git a/doc/C/prov-notes.xml b/doc/C/prov-notes.xml
index abdd6a2..91f1292 100644
--- a/doc/C/prov-notes.xml
+++ b/doc/C/prov-notes.xml
@@ -41,6 +41,42 @@
     </para>
   </sect1>
 
+<sect1 id="provider_notes_sqlite"><title>For SQLite</title>
+    <para>
+      The following arguments are used to connect to an SQLite database:
+      <table frame="all">
+        <tgroup cols="3" colsep="1" rowsep="1" align="justify">
+          <thead>
+	    <row>
+              <entry>Argument name</entry>
+              <entry>Description</entry>
+              <entry>Required</entry>
+	    </row>
+          </thead>
+          <tbody>
+	    <row>
+              <entry>DB_NAME</entry>
+              <entry>Name of the database. This should be the name of the database file without
+	      the ".db" extension. However when opening a database, if a file named after the DB_NAME
+	      value exists then it is used (so beware, for example if DB_NAME is set to "mydb" and a file
+	      named "mydb" exists but is not an SQLite database, then an error will occur)</entry>
+              <entry>Yes</entry>
+	    </row>
+	    <row>
+              <entry>DB_DIR</entry>
+              <entry>The directory where the database file is; if not specified, the current working
+	      directory is used.</entry>
+              <entry>No</entry>
+	    </row>
+	  </tbody>
+	</tgroup>
+      </table>
+    </para>
+    <para>
+      Also refer to the <link linkend="limitations_sqlite">SQLite's provider's limitations</link>.
+    </para>
+  </sect1>
+
   <sect1 id="provider_notes_sqlcipher"><title>For SQLCipher</title>
   <para>
     The SQLCipher database provider allows one to connect to a database encrypted using the
@@ -92,7 +128,8 @@ Opening connection 'c0' for: SQLCipher://DB_NAME=testcrypt
   </sect2>
 
   <para>
-    Also refer to the <link linkend="limitations_sqlcipher">SQLCipher provider's limitations</link>.
+    Also refer to the <link linkend="provider_notes_sqlite">SQLite's provider's notes</link>,
+    and <link linkend="limitations_sqlcipher">SQLCipher provider's limitations</link>.
   </para>
   </sect1>
   
diff --git a/doc/C/tmpl/gda-blob-op.sgml b/doc/C/tmpl/gda-blob-op.sgml
index 02ab1fb..878e399 100644
--- a/doc/C/tmpl/gda-blob-op.sgml
+++ b/doc/C/tmpl/gda-blob-op.sgml
@@ -69,7 +69,7 @@ gda_value_free (tmp_value);
 <para>
   For further information, see:
   <itemizedlist>
-    <listitem><para>the section about <link linkend="gen:blobs">Binary large objects (BLOBs)</link>'s
+    <listitem><para>the section about <link linkend="gen_blobs">Binary large objects (BLOBs)</link>'s
 	abstraction</para></listitem>
     <listitem><para><link linkend="libgda-provider-blobop">Virtual methods for Blob operations</link>
 	section for more information
diff --git a/libgda/gda-value.c b/libgda/gda-value.c
index afa5045..13b4c89 100644
--- a/libgda/gda-value.c
+++ b/libgda/gda-value.c
@@ -1196,11 +1196,11 @@ gda_value_new_blob (const guchar *val, glong size)
 }
 
 /**
- * gda_value_new_blob
- * @val: value to set for the new #GValue.
- * @size: the size of the memory pool pointer to by @val.
+ * gda_value_new_blob_from_file
+ * @filename: name of the file to manipulate
  *
- * Makes a new #GValue of type #GDA_TYPE_BLOB with the data contained by @val.
+ * Makes a new #GValue of type #GDA_TYPE_BLOB interfacing with the contents of the file
+ * named @filename
  *
  * Returns: the newly created #GValue.
  */



[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]