[chronojump/chronojump-importer] Uses more SQLite placeholders, adds copyright, improves user documentation.
- From: Carles Pina i Estany <carlespina src gnome org>
- To: commits-list gnome org
- Cc:
- Subject: [chronojump/chronojump-importer] Uses more SQLite placeholders, adds copyright, improves user documentation.
- Date: Mon, 5 Sep 2016 18:09:50 +0000 (UTC)
commit 7603b8280ebebe54473860a7f8399973a780f654
Author: Carles Pina i Estany <carles pina cat>
Date: Mon Sep 5 19:09:24 2016 +0100
Uses more SQLite placeholders, adds copyright, improves user documentation.
src/chronojump-importer/chronojump_importer.py | 118 ++++++++++++++++++------
1 files changed, 89 insertions(+), 29 deletions(-)
---
diff --git a/src/chronojump-importer/chronojump_importer.py b/src/chronojump-importer/chronojump_importer.py
index 97e254a..6361098 100755
--- a/src/chronojump-importer/chronojump_importer.py
+++ b/src/chronojump-importer/chronojump_importer.py
@@ -4,14 +4,35 @@ import copy
import argparse
import sqlite3
import logging
-
import sys
-import pprint
logging.basicConfig(level=logging.INFO)
+"""
+/*
+ * This file is part of ChronoJump
+ *
+ * ChronoJump is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * ChronoJump is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (C) 2016 Carles Pina
+ */
+"""
def get_column_names(cursor, table, skip_columns = []):
+ """ Returns the column names of table. Doesn't return any columns
+ indicated by skip_columns. """
cursor.execute("PRAGMA table_info({})".format(table))
result = cursor.fetchall()
@@ -37,6 +58,7 @@ def remove_elements(list_of_elements, elements_to_remove):
def add_prefix(list_of_elements, prefix):
+ """ Returns a copy of list_of_elements prefixing each element with prefix. """
result = []
for element in list_of_elements:
@@ -46,9 +68,22 @@ def add_prefix(list_of_elements, prefix):
def insert_data_into_table(cursor, table_name, data, matches_columns):
- """ Inserts data (list of dictionaries) into table_name for database.
- Returns a copy of data and adds new_unique_id. This is the new uniqueid or an
- existing one if a register with matches_columns already existed. """
+ """ Data is a list of dictionaries and the keys should match the columns
+ of table_name.
+
+ Inserts the data and returns a copy of data with a new key per each
+ dictionary: new_unique_id. This is the new uniqueID for this row if it
+ didn't exist or the old one. The matching is based on matches_columns.
+
+ For example, if matches_columns = ["Name"] it will insert a new row
+ in the table if the name didn't exist and will add new_unique_id
+ with this unique id.
+ If name already existed it will NOT insert anything in the table
+ but will add a new_unique_id with the ID of this person.
+
+ If matches_columns is None it means that will insert the data
+ regardless of any column.
+ """
data_result = copy.deepcopy(data)
@@ -69,29 +104,32 @@ def insert_data_into_table(cursor, table_name, data, matches_columns):
format_data['table_name'] = table_name
format_data['where_clause'] = " WHERE {}".format(where)
sql = "SELECT uniqueID FROM {table_name} {where_clause}".format(**format_data)
- cursor.execute(sql, where_values)
+ execute_query_and_log(cursor, sql, where_values)
results = cursor.fetchall()
if matches_columns is None or len(results) == 0:
- # Needs to insert
- insert_dictionary_into_table(cursor, table_name, row)
- new_id = cursor.lastrowid
- row['new_unique_id'] = new_id
+ # Needs to insert it
+ new_id = insert_dictionary_into_table(cursor, table_name, row)
+
else:
- # Returns uniqueid
- row['new_unique_id'] = results[0][0]
+ # Uses the existing id as new_unique_id
+ new_id = results[0][0]
+
+ row['new_unique_id'] = new_id
return data_result
def get_data_from_table(cursor, table_name, where_condition, join_clause ="", group_by_clause=""):
- """ Returns a list of lists of the database, table executing where and skips the columns. """
+ """ Returns a list of dictionaries of the table table_name applying the where_condition, join_clause and
group_by_clause. """
column_names = get_column_names(cursor, table_name)
column_names_with_prefixes = add_prefix(column_names, "{}.".format(table_name))
where_condition = " WHERE {} ".format(where_condition)
+ assert '"' not in where_condition # Easy way to avoid problems - where_condition is only used by us
(programmers) and
+ # it doesn't depend on user data.
if group_by_clause != "":
group_by = " GROUP BY {}".format(group_by_clause)
@@ -101,7 +139,7 @@ def get_data_from_table(cursor, table_name, where_condition, join_clause ="", gr
format_data = {"column_names": ",".join(column_names_with_prefixes), "table_name": table_name,
"join_clause": join_clause, "where": where_condition, "group_by": group_by}
sql = "SELECT {column_names} FROM {table_name} {join_clause} {where} {group_by}".format(**format_data)
- execute_and_log(cursor, sql)
+ execute_query_and_log(cursor, sql, [])
results = cursor.fetchall()
@@ -117,6 +155,9 @@ def get_data_from_table(cursor, table_name, where_condition, join_clause ="", gr
def insert_dictionary_into_table(cursor, table_name, row, skip_columns=["uniqueID"]):
+ """ Inserts the row (it's a dictionary) into table_name and skips skip_column.
+ Returns the new Id of the inserted row.
+ """
values = []
column_names = []
place_holders = []
@@ -131,13 +172,18 @@ def insert_dictionary_into_table(cursor, table_name, row, skip_columns=["uniqueI
sql = "INSERT INTO {table_name} ({column_names}) VALUES ({place_holders})".format(table_name=table_name,
column_names=",".join(column_names),
place_holders=",".join(place_holders))
+ execute_query_and_log(cursor, sql, values)
- cursor.execute(sql, values)
+ new_id = cursor.lastrowid
- return sql
+ return new_id
def update_persons77_ids(table, persons77_list):
+ """ table argument is a list of dictionaries. It returns a copy of it
+ replacing each personID by the 'new' personID contained in persons77_list.
+ persons77_list is a list of dictionaries and they must contain uniqueID (old person ID) and
+ new_unique_id -the new one in the new database. """
result = copy.deepcopy(table)
for row in table:
@@ -150,6 +196,9 @@ def update_persons77_ids(table, persons77_list):
def update_session_ids(table, new_session_id):
+ """ table argument is a list of dictionaries. It returns a copy of it
+ replacing each sessionID by new_session_id.
+ """
result = copy.deepcopy(table)
changed = False
@@ -176,6 +225,26 @@ def import_database(source_path, destination_path, source_session):
source_cursor = source_db.cursor()
destination_cursor = destination_db.cursor()
+ # Imports the session
+ session = get_data_from_table(cursor=source_cursor, table_name="Session",
+ where_condition="Session.uniqueID={}".format(source_session))
+
+ number_of_matching_sessions = len(session)
+
+ if number_of_matching_sessions == 0:
+ print("Trying to import {session} from {source_file} and it doesn't exist.
Cancelling...".format(session=source_session,
+
source_file=source_path))
+ sys.exit(1)
+ elif number_of_matching_sessions > 1:
+ print("Found {number_of_sessions} in {source_file} which is not possible.
Cancelling...".format(number_of_sessions=number_of_matching_sessions,
+
source_file=source_path))
+ sys.exit(1)
+
+ session = insert_data_into_table(cursor=destination_cursor, table_name="Session", data=session,
+ matches_columns=get_column_names(destination_cursor, "Session",
["uniqueID"]))
+
+ new_session_id = session[0]['new_unique_id']
+
# Imports JumpType table
jump_types = get_data_from_table(cursor=source_cursor, table_name="JumpType",
where_condition="Session.uniqueID={}".format(source_session),
@@ -194,15 +263,6 @@ def import_database(source_path, destination_path, source_session):
insert_data_into_table(cursor=destination_cursor, table_name="JumpRjType", data=jump_rj_types,
matches_columns=get_column_names(destination_cursor, "JumpRjType", ["uniqueID"]))
- # Imports the session
- session = get_data_from_table(cursor=source_cursor, table_name="Session",
- where_condition="Session.uniqueID={}".format(source_session))
-
- session = insert_data_into_table(cursor=destination_cursor, table_name="Session", data=session,
- matches_columns=get_column_names(destination_cursor, "Session",
["uniqueID"]))
-
- new_session_id = session[0]['new_unique_id']
-
# Imports Persons77 used by JumpRj table
persons77_jump_rj = get_data_from_table(cursor=source_cursor, table_name="Person77",
where_condition="JumpRj.sessionID={}".format(source_session),
@@ -271,13 +331,13 @@ def open_database(filename, read_only):
return conn
-def execute_and_log(cursor, sql, comment = ""):
- logging.debug("SQL: {} -- {}".format(sql,comment))
- cursor.execute(sql)
+def execute_query_and_log(cursor, sql, where_values):
+ logging.debug("SQL: {} - values: {}".format(sql, where_values))
+ cursor.execute(sql, where_values)
def process_command_line():
- parser = argparse.ArgumentParser(description="Process some integers.")
+ parser = argparse.ArgumentParser(description="Allows to import a session from one Chronojump database
file into another one")
parser.add_argument("--source", type=str, required=True,
help="chronojump.sqlite that we are importing from")
parser.add_argument("--destination", type=str, required=True,
[
Date Prev][
Date Next] [
Thread Prev][
Thread Next]
[
Thread Index]
[
Date Index]
[
Author Index]